2024-12-11 04:26:15,391 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-11 04:26:15,407 main DEBUG Took 0.013478 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-11 04:26:15,408 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-11 04:26:15,408 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-11 04:26:15,409 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-11 04:26:15,411 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,421 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-11 04:26:15,437 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,439 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,440 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,440 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,441 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,441 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,443 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,443 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,444 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,444 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,445 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,445 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,446 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,447 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,447 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,447 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,448 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,449 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,449 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,449 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,450 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,450 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,451 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,451 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 04:26:15,452 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,452 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-11 04:26:15,454 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 04:26:15,455 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-11 04:26:15,457 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-11 04:26:15,457 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-11 04:26:15,458 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-11 04:26:15,459 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-11 04:26:15,467 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-11 04:26:15,469 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-11 04:26:15,471 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-11 04:26:15,471 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-11 04:26:15,472 main DEBUG createAppenders(={Console}) 2024-12-11 04:26:15,472 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-11 04:26:15,473 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-11 04:26:15,473 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-11 04:26:15,473 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-11 04:26:15,474 main DEBUG OutputStream closed 2024-12-11 04:26:15,474 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-11 04:26:15,474 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-11 04:26:15,474 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-11 04:26:15,544 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-11 04:26:15,547 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-11 04:26:15,548 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-11 04:26:15,549 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-11 04:26:15,549 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-11 04:26:15,549 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-11 04:26:15,550 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-11 04:26:15,550 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-11 04:26:15,550 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-11 04:26:15,550 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-11 04:26:15,551 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-11 04:26:15,551 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-11 04:26:15,551 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-11 04:26:15,552 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-11 04:26:15,552 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-11 04:26:15,552 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-11 04:26:15,553 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-11 04:26:15,553 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-11 04:26:15,556 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11 04:26:15,556 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-11 04:26:15,556 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-11 04:26:15,557 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-11T04:26:15,813 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a 2024-12-11 04:26:15,816 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-11 04:26:15,816 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11T04:26:15,825 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-11T04:26:15,845 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T04:26:15,848 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/cluster_a7ae6713-c4ce-cfe9-805e-0174746b49b0, deleteOnExit=true 2024-12-11T04:26:15,849 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-11T04:26:15,849 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/test.cache.data in system properties and HBase conf 2024-12-11T04:26:15,850 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T04:26:15,850 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/hadoop.log.dir in system properties and HBase conf 2024-12-11T04:26:15,851 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T04:26:15,851 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T04:26:15,852 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-11T04:26:15,949 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-11T04:26:16,042 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T04:26:16,046 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T04:26:16,046 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T04:26:16,047 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T04:26:16,047 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T04:26:16,047 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T04:26:16,048 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T04:26:16,048 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T04:26:16,049 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T04:26:16,049 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T04:26:16,049 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/nfs.dump.dir in system properties and HBase conf 2024-12-11T04:26:16,050 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/java.io.tmpdir in system properties and HBase conf 2024-12-11T04:26:16,050 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T04:26:16,051 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T04:26:16,051 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T04:26:16,906 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-11T04:26:16,980 INFO [Time-limited test {}] log.Log(170): Logging initialized @2266ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-11T04:26:17,052 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T04:26:17,113 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T04:26:17,133 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T04:26:17,134 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T04:26:17,135 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T04:26:17,147 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T04:26:17,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/hadoop.log.dir/,AVAILABLE} 2024-12-11T04:26:17,151 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T04:26:17,351 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/java.io.tmpdir/jetty-localhost-40399-hadoop-hdfs-3_4_1-tests_jar-_-any-5823184675677878204/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T04:26:17,357 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:40399} 2024-12-11T04:26:17,358 INFO [Time-limited test {}] server.Server(415): Started @2645ms 2024-12-11T04:26:17,754 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T04:26:17,761 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T04:26:17,762 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T04:26:17,762 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T04:26:17,762 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T04:26:17,763 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ca71a25{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/hadoop.log.dir/,AVAILABLE} 2024-12-11T04:26:17,764 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T04:26:17,884 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3054265c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/java.io.tmpdir/jetty-localhost-43899-hadoop-hdfs-3_4_1-tests_jar-_-any-4327715829335421890/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T04:26:17,885 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@65902fec{HTTP/1.1, (http/1.1)}{localhost:43899} 2024-12-11T04:26:17,886 INFO [Time-limited test {}] server.Server(415): Started @3173ms 2024-12-11T04:26:17,946 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T04:26:18,467 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/cluster_a7ae6713-c4ce-cfe9-805e-0174746b49b0/dfs/data/data1/current/BP-232036649-172.17.0.2-1733891176636/current, will proceed with Du for space computation calculation, 2024-12-11T04:26:18,467 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/cluster_a7ae6713-c4ce-cfe9-805e-0174746b49b0/dfs/data/data2/current/BP-232036649-172.17.0.2-1733891176636/current, will proceed with Du for space computation calculation, 2024-12-11T04:26:18,505 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T04:26:18,564 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67e029bf9ce0e445 with lease ID 0x54bba573f52c36c5: Processing first storage report for DS-d9d18132-7621-466a-8037-49d78af36644 from datanode DatanodeRegistration(127.0.0.1:45413, datanodeUuid=155eeafc-ea12-4b95-970b-e440039dff40, infoPort=40827, infoSecurePort=0, ipcPort=34341, storageInfo=lv=-57;cid=testClusterID;nsid=547780956;c=1733891176636) 2024-12-11T04:26:18,566 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67e029bf9ce0e445 with lease ID 0x54bba573f52c36c5: from storage DS-d9d18132-7621-466a-8037-49d78af36644 node DatanodeRegistration(127.0.0.1:45413, datanodeUuid=155eeafc-ea12-4b95-970b-e440039dff40, infoPort=40827, infoSecurePort=0, ipcPort=34341, storageInfo=lv=-57;cid=testClusterID;nsid=547780956;c=1733891176636), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T04:26:18,566 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67e029bf9ce0e445 with lease ID 0x54bba573f52c36c5: Processing first storage report for DS-554a8003-39f2-4c6f-811b-97e169a40093 from datanode DatanodeRegistration(127.0.0.1:45413, datanodeUuid=155eeafc-ea12-4b95-970b-e440039dff40, infoPort=40827, infoSecurePort=0, ipcPort=34341, storageInfo=lv=-57;cid=testClusterID;nsid=547780956;c=1733891176636) 2024-12-11T04:26:18,566 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67e029bf9ce0e445 with lease ID 0x54bba573f52c36c5: from storage DS-554a8003-39f2-4c6f-811b-97e169a40093 node DatanodeRegistration(127.0.0.1:45413, datanodeUuid=155eeafc-ea12-4b95-970b-e440039dff40, infoPort=40827, infoSecurePort=0, ipcPort=34341, storageInfo=lv=-57;cid=testClusterID;nsid=547780956;c=1733891176636), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T04:26:18,621 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a 2024-12-11T04:26:18,704 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/cluster_a7ae6713-c4ce-cfe9-805e-0174746b49b0/zookeeper_0, clientPort=50078, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/cluster_a7ae6713-c4ce-cfe9-805e-0174746b49b0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/cluster_a7ae6713-c4ce-cfe9-805e-0174746b49b0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T04:26:18,713 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=50078 2024-12-11T04:26:18,727 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T04:26:18,731 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T04:26:18,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741825_1001 (size=7) 2024-12-11T04:26:19,390 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5 with version=8 2024-12-11T04:26:19,390 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/hbase-staging 2024-12-11T04:26:19,524 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-11T04:26:19,799 INFO [Time-limited test {}] client.ConnectionUtils(129): master/5f466b3719ec:0 server-side Connection retries=45 2024-12-11T04:26:19,818 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T04:26:19,819 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T04:26:19,819 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T04:26:19,819 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T04:26:19,819 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T04:26:19,954 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T04:26:20,015 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-11T04:26:20,024 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-11T04:26:20,028 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T04:26:20,055 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 12085 (auto-detected) 2024-12-11T04:26:20,057 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-11T04:26:20,076 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46111 2024-12-11T04:26:20,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T04:26:20,087 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T04:26:20,099 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:46111 connecting to ZooKeeper ensemble=127.0.0.1:50078 2024-12-11T04:26:20,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:461110x0, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T04:26:20,135 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46111-0x1007f5379a30000 connected 2024-12-11T04:26:20,170 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T04:26:20,174 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T04:26:20,177 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T04:26:20,184 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46111 2024-12-11T04:26:20,185 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46111 2024-12-11T04:26:20,185 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46111 2024-12-11T04:26:20,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46111 2024-12-11T04:26:20,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46111 2024-12-11T04:26:20,198 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5, hbase.cluster.distributed=false 2024-12-11T04:26:20,268 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/5f466b3719ec:0 server-side Connection retries=45 2024-12-11T04:26:20,268 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T04:26:20,268 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T04:26:20,268 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T04:26:20,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T04:26:20,269 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T04:26:20,271 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T04:26:20,274 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T04:26:20,275 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:39071 2024-12-11T04:26:20,277 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T04:26:20,282 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T04:26:20,285 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T04:26:20,289 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T04:26:20,293 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:39071 connecting to ZooKeeper ensemble=127.0.0.1:50078 2024-12-11T04:26:20,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:390710x0, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T04:26:20,297 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39071-0x1007f5379a30001 connected 2024-12-11T04:26:20,298 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T04:26:20,299 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T04:26:20,300 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T04:26:20,302 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39071 2024-12-11T04:26:20,302 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39071 2024-12-11T04:26:20,303 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39071 2024-12-11T04:26:20,303 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39071 2024-12-11T04:26:20,303 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39071 2024-12-11T04:26:20,306 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/5f466b3719ec,46111,1733891179518 2024-12-11T04:26:20,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T04:26:20,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T04:26:20,317 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/5f466b3719ec,46111,1733891179518 2024-12-11T04:26:20,321 DEBUG [M:0;5f466b3719ec:46111 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;5f466b3719ec:46111 2024-12-11T04:26:20,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T04:26:20,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T04:26:20,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:20,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:20,340 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T04:26:20,342 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/5f466b3719ec,46111,1733891179518 from backup master directory 2024-12-11T04:26:20,343 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T04:26:20,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/5f466b3719ec,46111,1733891179518 2024-12-11T04:26:20,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T04:26:20,346 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T04:26:20,347 WARN [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T04:26:20,347 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=5f466b3719ec,46111,1733891179518 2024-12-11T04:26:20,349 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-11T04:26:20,351 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-11T04:26:20,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741826_1002 (size=42) 2024-12-11T04:26:20,827 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/hbase.id with ID: 591064a7-9ecf-4824-bdad-c23b7499c3c6 2024-12-11T04:26:20,868 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T04:26:20,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:20,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:20,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741827_1003 (size=196) 2024-12-11T04:26:21,329 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T04:26:21,332 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T04:26:21,349 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:21,353 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T04:26:21,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741828_1004 (size=1189) 2024-12-11T04:26:21,800 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store 2024-12-11T04:26:21,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741829_1005 (size=34) 2024-12-11T04:26:21,819 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-11T04:26:21,820 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:26:21,821 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T04:26:21,821 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T04:26:21,821 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T04:26:21,821 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T04:26:21,821 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T04:26:21,822 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T04:26:21,822 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-11T04:26:21,823 WARN [master/5f466b3719ec:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/.initializing 2024-12-11T04:26:21,824 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/WALs/5f466b3719ec,46111,1733891179518 2024-12-11T04:26:21,830 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T04:26:21,840 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5f466b3719ec%2C46111%2C1733891179518, suffix=, logDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/WALs/5f466b3719ec,46111,1733891179518, archiveDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/oldWALs, maxLogs=10 2024-12-11T04:26:21,861 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/WALs/5f466b3719ec,46111,1733891179518/5f466b3719ec%2C46111%2C1733891179518.1733891181844, exclude list is [], retry=0 2024-12-11T04:26:21,877 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45413,DS-d9d18132-7621-466a-8037-49d78af36644,DISK] 2024-12-11T04:26:21,880 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-11T04:26:21,915 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/WALs/5f466b3719ec,46111,1733891179518/5f466b3719ec%2C46111%2C1733891179518.1733891181844 2024-12-11T04:26:21,916 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40827:40827)] 2024-12-11T04:26:21,916 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T04:26:21,917 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:26:21,920 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T04:26:21,921 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T04:26:21,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T04:26:21,981 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T04:26:21,985 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:21,987 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T04:26:21,987 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T04:26:21,991 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T04:26:21,991 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:21,992 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:21,992 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T04:26:21,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T04:26:21,995 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:21,996 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:21,996 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T04:26:21,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T04:26:21,998 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:21,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:22,003 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T04:26:22,004 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T04:26:22,012 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T04:26:22,016 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T04:26:22,020 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T04:26:22,022 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64526651, jitterRate=-0.03847797214984894}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T04:26:22,025 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-11T04:26:22,026 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T04:26:22,055 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f89b737, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:22,090 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-11T04:26:22,102 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T04:26:22,102 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T04:26:22,104 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T04:26:22,106 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-11T04:26:22,111 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-12-11T04:26:22,111 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T04:26:22,135 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T04:26:22,147 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T04:26:22,150 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-11T04:26:22,152 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T04:26:22,153 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T04:26:22,155 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-11T04:26:22,158 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T04:26:22,161 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T04:26:22,163 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-11T04:26:22,164 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T04:26:22,166 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T04:26:22,175 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T04:26:22,178 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T04:26:22,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T04:26:22,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T04:26:22,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:22,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:22,183 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=5f466b3719ec,46111,1733891179518, sessionid=0x1007f5379a30000, setting cluster-up flag (Was=false) 2024-12-11T04:26:22,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:22,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:22,205 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T04:26:22,207 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5f466b3719ec,46111,1733891179518 2024-12-11T04:26:22,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:22,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:22,219 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T04:26:22,220 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=5f466b3719ec,46111,1733891179518 2024-12-11T04:26:22,305 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-11T04:26:22,312 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-11T04:26:22,314 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T04:26:22,321 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;5f466b3719ec:39071 2024-12-11T04:26:22,320 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 5f466b3719ec,46111,1733891179518 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T04:26:22,323 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1008): ClusterId : 591064a7-9ecf-4824-bdad-c23b7499c3c6 2024-12-11T04:26:22,323 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/5f466b3719ec:0, corePoolSize=5, maxPoolSize=5 2024-12-11T04:26:22,323 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/5f466b3719ec:0, corePoolSize=5, maxPoolSize=5 2024-12-11T04:26:22,324 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/5f466b3719ec:0, corePoolSize=5, maxPoolSize=5 2024-12-11T04:26:22,324 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/5f466b3719ec:0, corePoolSize=5, maxPoolSize=5 2024-12-11T04:26:22,324 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/5f466b3719ec:0, corePoolSize=10, maxPoolSize=10 2024-12-11T04:26:22,324 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/5f466b3719ec:0, corePoolSize=1, maxPoolSize=1 2024-12-11T04:26:22,325 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/5f466b3719ec:0, corePoolSize=2, maxPoolSize=2 2024-12-11T04:26:22,325 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/5f466b3719ec:0, corePoolSize=1, maxPoolSize=1 2024-12-11T04:26:22,325 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T04:26:22,326 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733891212326 2024-12-11T04:26:22,328 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T04:26:22,329 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T04:26:22,330 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-11T04:26:22,330 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-11T04:26:22,331 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T04:26:22,331 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T04:26:22,333 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T04:26:22,333 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T04:26:22,334 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T04:26:22,334 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T04:26:22,334 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T04:26:22,335 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:22,335 DEBUG [RS:0;5f466b3719ec:39071 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@204c8d4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:22,335 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:22,336 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T04:26:22,336 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T04:26:22,337 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T04:26:22,337 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T04:26:22,340 DEBUG [RS:0;5f466b3719ec:39071 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27d91033, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5f466b3719ec/172.17.0.2:0 2024-12-11T04:26:22,341 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T04:26:22,341 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T04:26:22,344 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-11T04:26:22,344 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-11T04:26:22,344 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-11T04:26:22,345 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/5f466b3719ec:0:becomeActiveMaster-HFileCleaner.large.0-1733891182343,5,FailOnTimeoutGroup] 2024-12-11T04:26:22,346 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(3073): reportForDuty to master=5f466b3719ec,46111,1733891179518 with isa=5f466b3719ec/172.17.0.2:39071, startcode=1733891180267 2024-12-11T04:26:22,346 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/5f466b3719ec:0:becomeActiveMaster-HFileCleaner.small.0-1733891182345,5,FailOnTimeoutGroup] 2024-12-11T04:26:22,346 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:22,347 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T04:26:22,348 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:22,348 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:22,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741831_1007 (size=1039) 2024-12-11T04:26:22,359 DEBUG [RS:0;5f466b3719ec:39071 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T04:26:22,392 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37479, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T04:26:22,397 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46111 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:22,399 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46111 {}] master.ServerManager(486): Registering regionserver=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:22,413 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5 2024-12-11T04:26:22,413 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:43317 2024-12-11T04:26:22,413 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-11T04:26:22,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T04:26:22,418 DEBUG [RS:0;5f466b3719ec:39071 {}] zookeeper.ZKUtil(111): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/5f466b3719ec,39071,1733891180267 2024-12-11T04:26:22,418 WARN [RS:0;5f466b3719ec:39071 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T04:26:22,418 INFO [RS:0;5f466b3719ec:39071 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T04:26:22,419 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/WALs/5f466b3719ec,39071,1733891180267 2024-12-11T04:26:22,420 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [5f466b3719ec,39071,1733891180267] 2024-12-11T04:26:22,431 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-11T04:26:22,442 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T04:26:22,453 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T04:26:22,456 INFO [RS:0;5f466b3719ec:39071 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T04:26:22,456 INFO [RS:0;5f466b3719ec:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:22,457 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-11T04:26:22,464 INFO [RS:0;5f466b3719ec:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:22,464 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/5f466b3719ec:0, corePoolSize=1, maxPoolSize=1 2024-12-11T04:26:22,464 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/5f466b3719ec:0, corePoolSize=1, maxPoolSize=1 2024-12-11T04:26:22,464 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0, corePoolSize=1, maxPoolSize=1 2024-12-11T04:26:22,465 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/5f466b3719ec:0, corePoolSize=1, maxPoolSize=1 2024-12-11T04:26:22,465 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/5f466b3719ec:0, corePoolSize=1, maxPoolSize=1 2024-12-11T04:26:22,465 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/5f466b3719ec:0, corePoolSize=2, maxPoolSize=2 2024-12-11T04:26:22,465 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/5f466b3719ec:0, corePoolSize=1, maxPoolSize=1 2024-12-11T04:26:22,465 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/5f466b3719ec:0, corePoolSize=1, maxPoolSize=1 2024-12-11T04:26:22,465 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/5f466b3719ec:0, corePoolSize=1, maxPoolSize=1 2024-12-11T04:26:22,465 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/5f466b3719ec:0, corePoolSize=1, maxPoolSize=1 2024-12-11T04:26:22,466 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/5f466b3719ec:0, corePoolSize=1, maxPoolSize=1 2024-12-11T04:26:22,466 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/5f466b3719ec:0, corePoolSize=3, maxPoolSize=3 2024-12-11T04:26:22,466 DEBUG [RS:0;5f466b3719ec:39071 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0, corePoolSize=3, maxPoolSize=3 2024-12-11T04:26:22,468 INFO [RS:0;5f466b3719ec:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:22,468 INFO [RS:0;5f466b3719ec:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:22,468 INFO [RS:0;5f466b3719ec:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:22,468 INFO [RS:0;5f466b3719ec:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:22,469 INFO [RS:0;5f466b3719ec:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=5f466b3719ec,39071,1733891180267-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T04:26:22,489 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-11T04:26:22,491 INFO [RS:0;5f466b3719ec:39071 {}] hbase.ChoreService(168): Chore ScheduledChore name=5f466b3719ec,39071,1733891180267-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:22,510 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.Replication(204): 5f466b3719ec,39071,1733891180267 started 2024-12-11T04:26:22,510 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1767): Serving as 5f466b3719ec,39071,1733891180267, RpcServer on 5f466b3719ec/172.17.0.2:39071, sessionid=0x1007f5379a30001 2024-12-11T04:26:22,511 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T04:26:22,511 DEBUG [RS:0;5f466b3719ec:39071 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:22,511 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5f466b3719ec,39071,1733891180267' 2024-12-11T04:26:22,511 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T04:26:22,512 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T04:26:22,512 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T04:26:22,513 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T04:26:22,513 DEBUG [RS:0;5f466b3719ec:39071 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:22,513 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '5f466b3719ec,39071,1733891180267' 2024-12-11T04:26:22,513 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T04:26:22,513 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T04:26:22,514 DEBUG [RS:0;5f466b3719ec:39071 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T04:26:22,514 INFO [RS:0;5f466b3719ec:39071 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T04:26:22,514 INFO [RS:0;5f466b3719ec:39071 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T04:26:22,619 INFO [RS:0;5f466b3719ec:39071 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T04:26:22,623 INFO [RS:0;5f466b3719ec:39071 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5f466b3719ec%2C39071%2C1733891180267, suffix=, logDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/WALs/5f466b3719ec,39071,1733891180267, archiveDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/oldWALs, maxLogs=32 2024-12-11T04:26:22,639 DEBUG [RS:0;5f466b3719ec:39071 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/WALs/5f466b3719ec,39071,1733891180267/5f466b3719ec%2C39071%2C1733891180267.1733891182625, exclude list is [], retry=0 2024-12-11T04:26:22,644 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45413,DS-d9d18132-7621-466a-8037-49d78af36644,DISK] 2024-12-11T04:26:22,648 INFO [RS:0;5f466b3719ec:39071 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/WALs/5f466b3719ec,39071,1733891180267/5f466b3719ec%2C39071%2C1733891180267.1733891182625 2024-12-11T04:26:22,649 DEBUG [RS:0;5f466b3719ec:39071 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40827:40827)] 2024-12-11T04:26:22,753 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-11T04:26:22,753 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5 2024-12-11T04:26:22,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741833_1009 (size=32) 2024-12-11T04:26:23,164 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:26:23,167 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T04:26:23,169 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T04:26:23,170 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:23,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T04:26:23,171 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T04:26:23,173 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T04:26:23,173 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:23,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T04:26:23,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T04:26:23,177 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T04:26:23,177 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:23,178 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T04:26:23,180 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740 2024-12-11T04:26:23,181 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740 2024-12-11T04:26:23,184 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T04:26:23,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-11T04:26:23,191 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T04:26:23,191 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69581571, jitterRate=0.03684620559215546}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T04:26:23,194 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-11T04:26:23,194 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-11T04:26:23,195 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-11T04:26:23,195 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-11T04:26:23,195 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T04:26:23,195 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T04:26:23,196 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-11T04:26:23,196 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-11T04:26:23,199 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-11T04:26:23,199 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-11T04:26:23,206 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T04:26:23,216 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T04:26:23,218 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T04:26:23,370 DEBUG [5f466b3719ec:46111 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-11T04:26:23,374 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:23,379 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5f466b3719ec,39071,1733891180267, state=OPENING 2024-12-11T04:26:23,385 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T04:26:23,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:23,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:23,388 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T04:26:23,388 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T04:26:23,390 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:26:23,564 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:23,566 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T04:26:23,569 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T04:26:23,580 INFO [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-11T04:26:23,580 INFO [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T04:26:23,581 INFO [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-11T04:26:23,584 INFO [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=5f466b3719ec%2C39071%2C1733891180267.meta, suffix=.meta, logDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/WALs/5f466b3719ec,39071,1733891180267, archiveDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/oldWALs, maxLogs=32 2024-12-11T04:26:23,601 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/WALs/5f466b3719ec,39071,1733891180267/5f466b3719ec%2C39071%2C1733891180267.meta.1733891183586.meta, exclude list is [], retry=0 2024-12-11T04:26:23,604 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45413,DS-d9d18132-7621-466a-8037-49d78af36644,DISK] 2024-12-11T04:26:23,607 INFO [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/WALs/5f466b3719ec,39071,1733891180267/5f466b3719ec%2C39071%2C1733891180267.meta.1733891183586.meta 2024-12-11T04:26:23,608 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40827:40827)] 2024-12-11T04:26:23,608 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T04:26:23,610 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T04:26:23,671 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T04:26:23,677 INFO [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T04:26:23,683 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T04:26:23,683 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:26:23,683 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-11T04:26:23,684 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-11T04:26:23,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T04:26:23,689 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T04:26:23,689 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:23,690 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T04:26:23,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T04:26:23,692 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T04:26:23,693 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:23,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T04:26:23,694 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T04:26:23,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T04:26:23,695 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:23,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T04:26:23,698 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740 2024-12-11T04:26:23,700 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740 2024-12-11T04:26:23,703 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T04:26:23,706 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-11T04:26:23,708 INFO [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69400666, jitterRate=0.034150511026382446}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T04:26:23,710 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-11T04:26:23,718 INFO [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733891183558 2024-12-11T04:26:23,730 DEBUG [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T04:26:23,730 INFO [RS_OPEN_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-11T04:26:23,731 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:23,733 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 5f466b3719ec,39071,1733891180267, state=OPEN 2024-12-11T04:26:23,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T04:26:23,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T04:26:23,739 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T04:26:23,739 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T04:26:23,743 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T04:26:23,743 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=5f466b3719ec,39071,1733891180267 in 350 msec 2024-12-11T04:26:23,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T04:26:23,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 538 msec 2024-12-11T04:26:23,754 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.4990 sec 2024-12-11T04:26:23,754 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733891183754, completionTime=-1 2024-12-11T04:26:23,755 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-11T04:26:23,755 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-11T04:26:23,792 DEBUG [hconnection-0x5132e113-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:23,795 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39064, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:23,808 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-11T04:26:23,808 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733891243808 2024-12-11T04:26:23,808 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733891303808 2024-12-11T04:26:23,808 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 52 msec 2024-12-11T04:26:23,828 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5f466b3719ec,46111,1733891179518-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:23,829 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5f466b3719ec,46111,1733891179518-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:23,829 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5f466b3719ec,46111,1733891179518-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:23,830 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-5f466b3719ec:46111, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:23,830 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:23,835 DEBUG [master/5f466b3719ec:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-11T04:26:23,838 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-11T04:26:23,839 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T04:26:23,845 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-11T04:26:23,848 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T04:26:23,849 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:23,851 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T04:26:23,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741835_1011 (size=358) 2024-12-11T04:26:24,265 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 59414c8d1d0a5ff3a2f59cbb4ca8825b, NAME => 'hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5 2024-12-11T04:26:24,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741836_1012 (size=42) 2024-12-11T04:26:24,676 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:26:24,676 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 59414c8d1d0a5ff3a2f59cbb4ca8825b, disabling compactions & flushes 2024-12-11T04:26:24,676 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. 2024-12-11T04:26:24,676 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. 2024-12-11T04:26:24,676 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. after waiting 0 ms 2024-12-11T04:26:24,676 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. 2024-12-11T04:26:24,677 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. 2024-12-11T04:26:24,677 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 59414c8d1d0a5ff3a2f59cbb4ca8825b: 2024-12-11T04:26:24,679 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T04:26:24,685 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733891184680"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733891184680"}]},"ts":"1733891184680"} 2024-12-11T04:26:24,710 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T04:26:24,712 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T04:26:24,714 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891184712"}]},"ts":"1733891184712"} 2024-12-11T04:26:24,719 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-11T04:26:24,725 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=59414c8d1d0a5ff3a2f59cbb4ca8825b, ASSIGN}] 2024-12-11T04:26:24,728 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=59414c8d1d0a5ff3a2f59cbb4ca8825b, ASSIGN 2024-12-11T04:26:24,729 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=59414c8d1d0a5ff3a2f59cbb4ca8825b, ASSIGN; state=OFFLINE, location=5f466b3719ec,39071,1733891180267; forceNewPlan=false, retain=false 2024-12-11T04:26:24,882 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=59414c8d1d0a5ff3a2f59cbb4ca8825b, regionState=OPENING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:24,887 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 59414c8d1d0a5ff3a2f59cbb4ca8825b, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:26:25,040 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:25,046 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. 2024-12-11T04:26:25,047 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 59414c8d1d0a5ff3a2f59cbb4ca8825b, NAME => 'hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b.', STARTKEY => '', ENDKEY => ''} 2024-12-11T04:26:25,047 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 59414c8d1d0a5ff3a2f59cbb4ca8825b 2024-12-11T04:26:25,047 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:26:25,048 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 59414c8d1d0a5ff3a2f59cbb4ca8825b 2024-12-11T04:26:25,048 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 59414c8d1d0a5ff3a2f59cbb4ca8825b 2024-12-11T04:26:25,050 INFO [StoreOpener-59414c8d1d0a5ff3a2f59cbb4ca8825b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 59414c8d1d0a5ff3a2f59cbb4ca8825b 2024-12-11T04:26:25,052 INFO [StoreOpener-59414c8d1d0a5ff3a2f59cbb4ca8825b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59414c8d1d0a5ff3a2f59cbb4ca8825b columnFamilyName info 2024-12-11T04:26:25,052 DEBUG [StoreOpener-59414c8d1d0a5ff3a2f59cbb4ca8825b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:25,053 INFO [StoreOpener-59414c8d1d0a5ff3a2f59cbb4ca8825b-1 {}] regionserver.HStore(327): Store=59414c8d1d0a5ff3a2f59cbb4ca8825b/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:25,055 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/namespace/59414c8d1d0a5ff3a2f59cbb4ca8825b 2024-12-11T04:26:25,055 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/namespace/59414c8d1d0a5ff3a2f59cbb4ca8825b 2024-12-11T04:26:25,059 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 59414c8d1d0a5ff3a2f59cbb4ca8825b 2024-12-11T04:26:25,062 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/namespace/59414c8d1d0a5ff3a2f59cbb4ca8825b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T04:26:25,063 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 59414c8d1d0a5ff3a2f59cbb4ca8825b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60332229, jitterRate=-0.10097973048686981}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T04:26:25,064 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 59414c8d1d0a5ff3a2f59cbb4ca8825b: 2024-12-11T04:26:25,067 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b., pid=6, masterSystemTime=1733891185040 2024-12-11T04:26:25,070 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. 2024-12-11T04:26:25,070 INFO [RS_OPEN_PRIORITY_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. 2024-12-11T04:26:25,071 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=59414c8d1d0a5ff3a2f59cbb4ca8825b, regionState=OPEN, openSeqNum=2, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:25,078 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T04:26:25,079 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 59414c8d1d0a5ff3a2f59cbb4ca8825b, server=5f466b3719ec,39071,1733891180267 in 187 msec 2024-12-11T04:26:25,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T04:26:25,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=59414c8d1d0a5ff3a2f59cbb4ca8825b, ASSIGN in 353 msec 2024-12-11T04:26:25,083 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T04:26:25,083 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891185083"}]},"ts":"1733891185083"} 2024-12-11T04:26:25,086 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-11T04:26:25,091 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T04:26:25,094 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2510 sec 2024-12-11T04:26:25,163 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-11T04:26:25,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-11T04:26:25,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:25,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:26:25,196 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-11T04:26:25,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-11T04:26:25,216 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 24 msec 2024-12-11T04:26:25,220 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-11T04:26:25,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-11T04:26:25,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 14 msec 2024-12-11T04:26:25,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-11T04:26:25,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-11T04:26:25,249 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.902sec 2024-12-11T04:26:25,251 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T04:26:25,252 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T04:26:25,253 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T04:26:25,253 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T04:26:25,253 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T04:26:25,254 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5f466b3719ec,46111,1733891179518-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T04:26:25,255 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5f466b3719ec,46111,1733891179518-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T04:26:25,261 DEBUG [master/5f466b3719ec:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-11T04:26:25,262 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T04:26:25,262 INFO [master/5f466b3719ec:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=5f466b3719ec,46111,1733891179518-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T04:26:25,324 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76523d14 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46873e4f 2024-12-11T04:26:25,325 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-11T04:26:25,331 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76ba07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:25,335 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-11T04:26:25,335 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-11T04:26:25,343 DEBUG [hconnection-0x7edf53b1-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:25,351 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39080, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:25,360 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=5f466b3719ec,46111,1733891179518 2024-12-11T04:26:25,378 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=134, ProcessCount=11, AvailableMemoryMB=4255 2024-12-11T04:26:25,391 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T04:26:25,394 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51906, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T04:26:25,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T04:26:25,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T04:26:25,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-11T04:26:25,409 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T04:26:25,409 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-11T04:26:25,409 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:25,411 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T04:26:25,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-11T04:26:25,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741837_1013 (size=963) 2024-12-11T04:26:25,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-11T04:26:25,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-11T04:26:25,831 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5 2024-12-11T04:26:25,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741838_1014 (size=53) 2024-12-11T04:26:25,840 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:26:25,841 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing c8c23c02526ae28f7a94d562fbd47bb4, disabling compactions & flushes 2024-12-11T04:26:25,841 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:25,841 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:25,841 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. after waiting 0 ms 2024-12-11T04:26:25,841 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:25,841 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:25,841 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:25,843 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T04:26:25,843 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733891185843"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733891185843"}]},"ts":"1733891185843"} 2024-12-11T04:26:25,846 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T04:26:25,848 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T04:26:25,848 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891185848"}]},"ts":"1733891185848"} 2024-12-11T04:26:25,850 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-11T04:26:25,855 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c8c23c02526ae28f7a94d562fbd47bb4, ASSIGN}] 2024-12-11T04:26:25,857 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c8c23c02526ae28f7a94d562fbd47bb4, ASSIGN 2024-12-11T04:26:25,858 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c8c23c02526ae28f7a94d562fbd47bb4, ASSIGN; state=OFFLINE, location=5f466b3719ec,39071,1733891180267; forceNewPlan=false, retain=false 2024-12-11T04:26:26,009 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=c8c23c02526ae28f7a94d562fbd47bb4, regionState=OPENING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:26,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:26:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-11T04:26:26,166 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:26,173 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:26,173 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} 2024-12-11T04:26:26,174 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:26,174 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:26:26,174 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:26,174 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:26,176 INFO [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:26,179 INFO [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:26:26,179 INFO [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8c23c02526ae28f7a94d562fbd47bb4 columnFamilyName A 2024-12-11T04:26:26,180 DEBUG [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:26,181 INFO [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] regionserver.HStore(327): Store=c8c23c02526ae28f7a94d562fbd47bb4/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:26,181 INFO [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:26,182 INFO [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:26:26,183 INFO [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8c23c02526ae28f7a94d562fbd47bb4 columnFamilyName B 2024-12-11T04:26:26,183 DEBUG [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:26,184 INFO [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] regionserver.HStore(327): Store=c8c23c02526ae28f7a94d562fbd47bb4/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:26,184 INFO [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:26,186 INFO [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:26:26,186 INFO [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8c23c02526ae28f7a94d562fbd47bb4 columnFamilyName C 2024-12-11T04:26:26,186 DEBUG [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:26,187 INFO [StoreOpener-c8c23c02526ae28f7a94d562fbd47bb4-1 {}] regionserver.HStore(327): Store=c8c23c02526ae28f7a94d562fbd47bb4/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:26,188 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:26,189 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:26,190 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:26,193 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T04:26:26,195 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:26,198 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T04:26:26,199 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened c8c23c02526ae28f7a94d562fbd47bb4; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59148833, jitterRate=-0.11861370503902435}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T04:26:26,200 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:26,201 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., pid=11, masterSystemTime=1733891186166 2024-12-11T04:26:26,204 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:26,204 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:26,205 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=c8c23c02526ae28f7a94d562fbd47bb4, regionState=OPEN, openSeqNum=2, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:26,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-11T04:26:26,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 in 196 msec 2024-12-11T04:26:26,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-11T04:26:26,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c8c23c02526ae28f7a94d562fbd47bb4, ASSIGN in 357 msec 2024-12-11T04:26:26,216 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T04:26:26,216 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891186216"}]},"ts":"1733891186216"} 2024-12-11T04:26:26,219 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-11T04:26:26,222 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T04:26:26,225 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 817 msec 2024-12-11T04:26:26,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-11T04:26:26,526 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-11T04:26:26,531 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6fcb5f29 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7fdf5682 2024-12-11T04:26:26,536 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f6e36fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:26,538 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:26,540 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:26,544 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T04:26:26,546 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51908, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T04:26:26,553 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f2091cc to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79d38d10 2024-12-11T04:26:26,556 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f343a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:26,557 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09bd0964 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c63ae4e 2024-12-11T04:26:26,563 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1324ee83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:26,564 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18cb251d to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@736f1673 2024-12-11T04:26:26,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@478bae6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:26,569 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x45b55c24 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ee2166f 2024-12-11T04:26:26,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48068a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:26,573 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e52b42a to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f34ff67 2024-12-11T04:26:26,576 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38766d64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:26,578 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09ed28bb to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4b5cad1a 2024-12-11T04:26:26,581 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@295cb1ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:26,582 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12a1285d to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c3b736e 2024-12-11T04:26:26,585 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70267494, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:26,586 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x353bc462 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@767a8485 2024-12-11T04:26:26,590 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d2a8e08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:26,591 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47fe2fa7 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6502d571 2024-12-11T04:26:26,594 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c915d17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:26,599 DEBUG [hconnection-0x7da3ac9b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:26,600 DEBUG [hconnection-0x4d811003-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:26,601 DEBUG [hconnection-0x6ee6832c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:26,606 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39106, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:26,607 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39108, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:26,607 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:26,609 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39120, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:26,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-11T04:26:26,616 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:26,618 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:26,620 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:26,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T04:26:26,632 DEBUG [hconnection-0x4ee426b5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:26,635 DEBUG [hconnection-0x79bb3058-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:26,637 DEBUG [hconnection-0x1734c206-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:26,640 DEBUG [hconnection-0x2522abb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:26,644 DEBUG [hconnection-0x7a7e860b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:26,652 DEBUG [hconnection-0x213b78eb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:26,673 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39124, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:26,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:26:26,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:26,691 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39126, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:26,697 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39146, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:26,698 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39136, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:26,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:26,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:26,700 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:26,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:26,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:26,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:26,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:26,704 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39168, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:26,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T04:26:26,794 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:26,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T04:26:26,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:26,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:26,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:26,806 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:26,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:26,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:26,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/0e6894392db447e6bcd16ce828576b3c is 50, key is test_row_0/A:col10/1733891186677/Put/seqid=0 2024-12-11T04:26:26,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:26,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891246860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:26,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:26,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891246865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:26,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:26,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891246870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:26,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:26,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891246872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:26,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:26,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891246874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:26,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741839_1015 (size=9657) 2024-12-11T04:26:26,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T04:26:26,974 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:26,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T04:26:26,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:26,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:26,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:26,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:26,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:26,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891247006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891247005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,013 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891247007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891247007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891247008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,147 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T04:26:27,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:27,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:27,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:27,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891247217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891247216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891247217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891247219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891247221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T04:26:27,304 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T04:26:27,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:27,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:27,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:27,306 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/0e6894392db447e6bcd16ce828576b3c 2024-12-11T04:26:27,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/88b7fb6ad8eb4522981555a9c89f8fb2 is 50, key is test_row_0/B:col10/1733891186677/Put/seqid=0 2024-12-11T04:26:27,459 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,462 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T04:26:27,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:27,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:27,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:27,462 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741840_1016 (size=9657) 2024-12-11T04:26:27,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/88b7fb6ad8eb4522981555a9c89f8fb2 2024-12-11T04:26:27,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/a5e752cdb82d41d59be5f043f543b27a is 50, key is test_row_0/C:col10/1733891186677/Put/seqid=0 2024-12-11T04:26:27,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891247526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891247527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891247526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:27,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891247527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891247527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741841_1017 (size=9657) 2024-12-11T04:26:27,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/a5e752cdb82d41d59be5f043f543b27a 2024-12-11T04:26:27,577 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/0e6894392db447e6bcd16ce828576b3c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/0e6894392db447e6bcd16ce828576b3c 2024-12-11T04:26:27,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/0e6894392db447e6bcd16ce828576b3c, entries=100, sequenceid=13, filesize=9.4 K 2024-12-11T04:26:27,598 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/88b7fb6ad8eb4522981555a9c89f8fb2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/88b7fb6ad8eb4522981555a9c89f8fb2 2024-12-11T04:26:27,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/88b7fb6ad8eb4522981555a9c89f8fb2, entries=100, sequenceid=13, filesize=9.4 K 2024-12-11T04:26:27,616 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,617 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T04:26:27,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:27,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:27,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:27,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/a5e752cdb82d41d59be5f043f543b27a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/a5e752cdb82d41d59be5f043f543b27a 2024-12-11T04:26:27,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:27,642 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/a5e752cdb82d41d59be5f043f543b27a, entries=100, sequenceid=13, filesize=9.4 K 2024-12-11T04:26:27,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c8c23c02526ae28f7a94d562fbd47bb4 in 964ms, sequenceid=13, compaction requested=false 2024-12-11T04:26:27,646 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-11T04:26:27,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:27,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T04:26:27,778 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:27,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-11T04:26:27,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:27,780 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:26:27,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:27,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:27,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:27,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:27,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:27,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:27,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/2a84c15c354c47f480d3dbe67c4b3cc5 is 50, key is test_row_0/A:col10/1733891186859/Put/seqid=0 2024-12-11T04:26:27,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741842_1018 (size=12001) 2024-12-11T04:26:28,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:28,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:28,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891248071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891248071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891248080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891248080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891248082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891248185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891248185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891248187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891248188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891248189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,228 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/2a84c15c354c47f480d3dbe67c4b3cc5 2024-12-11T04:26:28,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/d9a4aba5b75f4c7eb337238f37ee06cc is 50, key is test_row_0/B:col10/1733891186859/Put/seqid=0 2024-12-11T04:26:28,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741843_1019 (size=12001) 2024-12-11T04:26:28,284 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/d9a4aba5b75f4c7eb337238f37ee06cc 2024-12-11T04:26:28,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/23bb06ca4ce944319eed3cb6b1eb57ab is 50, key is test_row_0/C:col10/1733891186859/Put/seqid=0 2024-12-11T04:26:28,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741844_1020 (size=12001) 2024-12-11T04:26:28,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891248392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891248393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891248393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891248394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891248394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,435 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-11T04:26:28,437 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-11T04:26:28,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891248700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891248702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891248703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891248703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,707 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:28,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891248704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:28,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T04:26:28,756 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/23bb06ca4ce944319eed3cb6b1eb57ab 2024-12-11T04:26:28,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/2a84c15c354c47f480d3dbe67c4b3cc5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/2a84c15c354c47f480d3dbe67c4b3cc5 2024-12-11T04:26:28,780 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/2a84c15c354c47f480d3dbe67c4b3cc5, entries=150, sequenceid=37, filesize=11.7 K 2024-12-11T04:26:28,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/d9a4aba5b75f4c7eb337238f37ee06cc as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/d9a4aba5b75f4c7eb337238f37ee06cc 2024-12-11T04:26:28,799 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/d9a4aba5b75f4c7eb337238f37ee06cc, entries=150, sequenceid=37, filesize=11.7 K 2024-12-11T04:26:28,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/23bb06ca4ce944319eed3cb6b1eb57ab as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/23bb06ca4ce944319eed3cb6b1eb57ab 2024-12-11T04:26:28,820 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/23bb06ca4ce944319eed3cb6b1eb57ab, entries=150, sequenceid=37, filesize=11.7 K 2024-12-11T04:26:28,836 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for c8c23c02526ae28f7a94d562fbd47bb4 in 1056ms, sequenceid=37, compaction requested=false 2024-12-11T04:26:28,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:28,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:28,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-11T04:26:28,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-11T04:26:28,842 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-11T04:26:28,842 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2200 sec 2024-12-11T04:26:28,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.2340 sec 2024-12-11T04:26:29,201 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T04:26:29,215 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T04:26:29,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:29,215 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:29,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:29,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:29,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:29,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:29,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:29,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/88bbb5debb364f7b9d47f3def2fd0b6d is 50, key is test_row_0/A:col10/1733891189212/Put/seqid=0 2024-12-11T04:26:29,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741845_1021 (size=12001) 2024-12-11T04:26:29,273 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/88bbb5debb364f7b9d47f3def2fd0b6d 2024-12-11T04:26:29,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891249265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891249275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891249276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891249271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891249277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/14b170e24c1148b09d3c03d0b462c974 is 50, key is test_row_0/B:col10/1733891189212/Put/seqid=0 2024-12-11T04:26:29,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741846_1022 (size=12001) 2024-12-11T04:26:29,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/14b170e24c1148b09d3c03d0b462c974 2024-12-11T04:26:29,379 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/64657ca9ff154d0aa4e46ba87875f2b9 is 50, key is test_row_0/C:col10/1733891189212/Put/seqid=0 2024-12-11T04:26:29,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891249382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891249383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891249384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891249387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891249387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741847_1023 (size=12001) 2024-12-11T04:26:29,422 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/64657ca9ff154d0aa4e46ba87875f2b9 2024-12-11T04:26:29,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/88bbb5debb364f7b9d47f3def2fd0b6d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/88bbb5debb364f7b9d47f3def2fd0b6d 2024-12-11T04:26:29,459 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/88bbb5debb364f7b9d47f3def2fd0b6d, entries=150, sequenceid=52, filesize=11.7 K 2024-12-11T04:26:29,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/14b170e24c1148b09d3c03d0b462c974 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/14b170e24c1148b09d3c03d0b462c974 2024-12-11T04:26:29,474 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/14b170e24c1148b09d3c03d0b462c974, entries=150, sequenceid=52, filesize=11.7 K 2024-12-11T04:26:29,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/64657ca9ff154d0aa4e46ba87875f2b9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/64657ca9ff154d0aa4e46ba87875f2b9 2024-12-11T04:26:29,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/64657ca9ff154d0aa4e46ba87875f2b9, entries=150, sequenceid=52, filesize=11.7 K 2024-12-11T04:26:29,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for c8c23c02526ae28f7a94d562fbd47bb4 in 275ms, sequenceid=52, compaction requested=true 2024-12-11T04:26:29,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:29,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:29,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:29,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:29,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:29,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:29,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:29,501 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:29,506 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:29,507 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:29,508 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:29,508 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/88b7fb6ad8eb4522981555a9c89f8fb2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/d9a4aba5b75f4c7eb337238f37ee06cc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/14b170e24c1148b09d3c03d0b462c974] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=32.9 K 2024-12-11T04:26:29,509 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 88b7fb6ad8eb4522981555a9c89f8fb2, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733891186647 2024-12-11T04:26:29,510 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting d9a4aba5b75f4c7eb337238f37ee06cc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733891186859 2024-12-11T04:26:29,511 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:29,511 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 14b170e24c1148b09d3c03d0b462c974, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733891188056 2024-12-11T04:26:29,516 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:29,517 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:29,517 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:29,517 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/0e6894392db447e6bcd16ce828576b3c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/2a84c15c354c47f480d3dbe67c4b3cc5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/88bbb5debb364f7b9d47f3def2fd0b6d] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=32.9 K 2024-12-11T04:26:29,518 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e6894392db447e6bcd16ce828576b3c, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733891186647 2024-12-11T04:26:29,519 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a84c15c354c47f480d3dbe67c4b3cc5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733891186859 2024-12-11T04:26:29,520 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88bbb5debb364f7b9d47f3def2fd0b6d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733891188056 2024-12-11T04:26:29,557 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#9 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:29,558 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/1715d09c8f6e4ec684e9d3a95b9539e1 is 50, key is test_row_0/B:col10/1733891189212/Put/seqid=0 2024-12-11T04:26:29,576 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#10 average throughput is 0.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:29,577 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5a4b4f64059c478ba193dbd554649405 is 50, key is test_row_0/A:col10/1733891189212/Put/seqid=0 2024-12-11T04:26:29,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741848_1024 (size=12104) 2024-12-11T04:26:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:29,614 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T04:26:29,614 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:29,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741849_1025 (size=12104) 2024-12-11T04:26:29,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:29,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:29,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:29,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:29,615 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:29,637 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/1715d09c8f6e4ec684e9d3a95b9539e1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1715d09c8f6e4ec684e9d3a95b9539e1 2024-12-11T04:26:29,637 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5a4b4f64059c478ba193dbd554649405 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5a4b4f64059c478ba193dbd554649405 2024-12-11T04:26:29,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/9c183b20da19439aa33729c6f605e9eb is 50, key is test_row_0/A:col10/1733891189270/Put/seqid=0 2024-12-11T04:26:29,663 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into 5a4b4f64059c478ba193dbd554649405(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:29,663 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:29,663 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891189493; duration=0sec 2024-12-11T04:26:29,664 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:29,664 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:29,664 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:29,665 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into 1715d09c8f6e4ec684e9d3a95b9539e1(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:29,665 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:29,665 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891189500; duration=0sec 2024-12-11T04:26:29,666 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:29,666 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:29,667 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:29,667 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:29,667 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:29,668 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/a5e752cdb82d41d59be5f043f543b27a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/23bb06ca4ce944319eed3cb6b1eb57ab, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/64657ca9ff154d0aa4e46ba87875f2b9] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=32.9 K 2024-12-11T04:26:29,668 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5e752cdb82d41d59be5f043f543b27a, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733891186647 2024-12-11T04:26:29,669 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23bb06ca4ce944319eed3cb6b1eb57ab, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733891186859 2024-12-11T04:26:29,670 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64657ca9ff154d0aa4e46ba87875f2b9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733891188056 2024-12-11T04:26:29,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891249658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891249661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741850_1026 (size=12001) 2024-12-11T04:26:29,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891249676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,689 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/9c183b20da19439aa33729c6f605e9eb 2024-12-11T04:26:29,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891249685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891249689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,715 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#12 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:29,716 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/6d94b2bf9d684549b09cf7ea261bbae1 is 50, key is test_row_0/C:col10/1733891189212/Put/seqid=0 2024-12-11T04:26:29,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/7d8c05e6ebdd48bbb0e62d690f655920 is 50, key is test_row_0/B:col10/1733891189270/Put/seqid=0 2024-12-11T04:26:29,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741851_1027 (size=12104) 2024-12-11T04:26:29,762 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/6d94b2bf9d684549b09cf7ea261bbae1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6d94b2bf9d684549b09cf7ea261bbae1 2024-12-11T04:26:29,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741852_1028 (size=12001) 2024-12-11T04:26:29,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/7d8c05e6ebdd48bbb0e62d690f655920 2024-12-11T04:26:29,779 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into 6d94b2bf9d684549b09cf7ea261bbae1(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:29,779 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:29,779 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891189500; duration=0sec 2024-12-11T04:26:29,779 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:29,779 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:29,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891249788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891249789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891249791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/9f1dc652c61b429d951260284ec02979 is 50, key is test_row_0/C:col10/1733891189270/Put/seqid=0 2024-12-11T04:26:29,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891249800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:29,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891249801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:29,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741853_1029 (size=12001) 2024-12-11T04:26:29,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/9f1dc652c61b429d951260284ec02979 2024-12-11T04:26:29,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/9c183b20da19439aa33729c6f605e9eb as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/9c183b20da19439aa33729c6f605e9eb 2024-12-11T04:26:29,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/9c183b20da19439aa33729c6f605e9eb, entries=150, sequenceid=74, filesize=11.7 K 2024-12-11T04:26:29,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/7d8c05e6ebdd48bbb0e62d690f655920 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/7d8c05e6ebdd48bbb0e62d690f655920 2024-12-11T04:26:29,908 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/7d8c05e6ebdd48bbb0e62d690f655920, entries=150, sequenceid=74, filesize=11.7 K 2024-12-11T04:26:29,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/9f1dc652c61b429d951260284ec02979 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9f1dc652c61b429d951260284ec02979 2024-12-11T04:26:29,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9f1dc652c61b429d951260284ec02979, entries=150, sequenceid=74, filesize=11.7 K 2024-12-11T04:26:29,933 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for c8c23c02526ae28f7a94d562fbd47bb4 in 319ms, sequenceid=74, compaction requested=false 2024-12-11T04:26:29,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:29,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:29,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-11T04:26:29,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:29,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:29,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:29,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:29,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:29,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:30,012 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-11T04:26:30,013 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-11T04:26:30,015 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-11T04:26:30,015 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-11T04:26:30,017 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T04:26:30,017 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-11T04:26:30,018 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-11T04:26:30,018 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-11T04:26:30,019 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-11T04:26:30,019 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-11T04:26:30,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5fc1999774654217bb649da43963aea4 is 50, key is test_row_0/A:col10/1733891189672/Put/seqid=0 2024-12-11T04:26:30,044 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891250038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891250038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891250040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891250041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891250042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741854_1030 (size=14341) 2024-12-11T04:26:30,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5fc1999774654217bb649da43963aea4 2024-12-11T04:26:30,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/88778e6523c641528a0a7d9393a390bd is 50, key is test_row_0/B:col10/1733891189672/Put/seqid=0 2024-12-11T04:26:30,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741855_1031 (size=12001) 2024-12-11T04:26:30,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/88778e6523c641528a0a7d9393a390bd 2024-12-11T04:26:30,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891250151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891250151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,155 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891250152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891250152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891250153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/9f85eb2218094c0e86cd9e13a3b381c6 is 50, key is test_row_0/C:col10/1733891189672/Put/seqid=0 2024-12-11T04:26:30,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741856_1032 (size=12001) 2024-12-11T04:26:30,207 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/9f85eb2218094c0e86cd9e13a3b381c6 2024-12-11T04:26:30,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5fc1999774654217bb649da43963aea4 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5fc1999774654217bb649da43963aea4 2024-12-11T04:26:30,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5fc1999774654217bb649da43963aea4, entries=200, sequenceid=93, filesize=14.0 K 2024-12-11T04:26:30,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/88778e6523c641528a0a7d9393a390bd as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/88778e6523c641528a0a7d9393a390bd 2024-12-11T04:26:30,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/88778e6523c641528a0a7d9393a390bd, entries=150, sequenceid=93, filesize=11.7 K 2024-12-11T04:26:30,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/9f85eb2218094c0e86cd9e13a3b381c6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9f85eb2218094c0e86cd9e13a3b381c6 2024-12-11T04:26:30,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9f85eb2218094c0e86cd9e13a3b381c6, entries=150, sequenceid=93, filesize=11.7 K 2024-12-11T04:26:30,291 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for c8c23c02526ae28f7a94d562fbd47bb4 in 295ms, sequenceid=93, compaction requested=true 2024-12-11T04:26:30,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:30,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:30,291 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:30,291 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:30,292 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:30,294 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:30,294 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:30,294 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:30,295 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:30,295 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:30,295 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:30,295 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1715d09c8f6e4ec684e9d3a95b9539e1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/7d8c05e6ebdd48bbb0e62d690f655920, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/88778e6523c641528a0a7d9393a390bd] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=35.3 K 2024-12-11T04:26:30,295 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5a4b4f64059c478ba193dbd554649405, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/9c183b20da19439aa33729c6f605e9eb, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5fc1999774654217bb649da43963aea4] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.5 K 2024-12-11T04:26:30,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:30,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:30,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:30,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:30,296 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a4b4f64059c478ba193dbd554649405, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733891188056 2024-12-11T04:26:30,296 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 1715d09c8f6e4ec684e9d3a95b9539e1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733891188056 2024-12-11T04:26:30,297 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c183b20da19439aa33729c6f605e9eb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733891189257 2024-12-11T04:26:30,298 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d8c05e6ebdd48bbb0e62d690f655920, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733891189257 2024-12-11T04:26:30,298 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fc1999774654217bb649da43963aea4, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733891189672 2024-12-11T04:26:30,299 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 88778e6523c641528a0a7d9393a390bd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733891189672 2024-12-11T04:26:30,323 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:30,325 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/fa1c6b5eea8e45d4ba5534c373d232ca is 50, key is test_row_0/B:col10/1733891189672/Put/seqid=0 2024-12-11T04:26:30,335 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:30,336 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/326763d2c70e4256abf5ecbbcec31a21 is 50, key is test_row_0/A:col10/1733891189672/Put/seqid=0 2024-12-11T04:26:30,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:30,362 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T04:26:30,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:30,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:30,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:30,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:30,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:30,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:30,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741857_1033 (size=12207) 2024-12-11T04:26:30,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891250375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891250377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891250381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891250382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,389 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/0cf6791a0f3342a8b42c9ec46bb31fd5 is 50, key is test_row_0/A:col10/1733891190019/Put/seqid=0 2024-12-11T04:26:30,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891250384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741858_1034 (size=12207) 2024-12-11T04:26:30,407 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/326763d2c70e4256abf5ecbbcec31a21 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/326763d2c70e4256abf5ecbbcec31a21 2024-12-11T04:26:30,421 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into 326763d2c70e4256abf5ecbbcec31a21(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:30,421 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:30,421 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891190291; duration=0sec 2024-12-11T04:26:30,421 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:30,421 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:30,422 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:30,424 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:30,424 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:30,424 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:30,426 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6d94b2bf9d684549b09cf7ea261bbae1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9f1dc652c61b429d951260284ec02979, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9f85eb2218094c0e86cd9e13a3b381c6] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=35.3 K 2024-12-11T04:26:30,427 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d94b2bf9d684549b09cf7ea261bbae1, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733891188056 2024-12-11T04:26:30,427 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f1dc652c61b429d951260284ec02979, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733891189257 2024-12-11T04:26:30,428 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f85eb2218094c0e86cd9e13a3b381c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733891189672 2024-12-11T04:26:30,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741859_1035 (size=12001) 2024-12-11T04:26:30,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/0cf6791a0f3342a8b42c9ec46bb31fd5 2024-12-11T04:26:30,463 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:30,464 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/689c5752e9a8487dbc9e824cb82034f6 is 50, key is test_row_0/C:col10/1733891189672/Put/seqid=0 2024-12-11T04:26:30,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/159e6bcc07be443d816600856ad6b75e is 50, key is test_row_0/B:col10/1733891190019/Put/seqid=0 2024-12-11T04:26:30,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741860_1036 (size=12207) 2024-12-11T04:26:30,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891250486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891250487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891250489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891250491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891250497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,503 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/689c5752e9a8487dbc9e824cb82034f6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/689c5752e9a8487dbc9e824cb82034f6 2024-12-11T04:26:30,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741861_1037 (size=12001) 2024-12-11T04:26:30,521 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into 689c5752e9a8487dbc9e824cb82034f6(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:30,521 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:30,522 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891190296; duration=0sec 2024-12-11T04:26:30,522 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:30,522 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:30,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891250718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891250718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891250718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891250718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:30,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891250719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-11T04:26:30,739 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-11T04:26:30,743 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:30,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-11T04:26:30,747 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:30,748 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:30,749 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:30,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-11T04:26:30,787 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/fa1c6b5eea8e45d4ba5534c373d232ca as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/fa1c6b5eea8e45d4ba5534c373d232ca 2024-12-11T04:26:30,800 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into fa1c6b5eea8e45d4ba5534c373d232ca(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:30,800 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:30,800 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891190291; duration=0sec 2024-12-11T04:26:30,802 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:30,802 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:30,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-11T04:26:30,902 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:30,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-11T04:26:30,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:30,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:30,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:30,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:30,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:30,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:30,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/159e6bcc07be443d816600856ad6b75e 2024-12-11T04:26:30,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/ffba4a8a19314d82853e7744cdf48037 is 50, key is test_row_0/C:col10/1733891190019/Put/seqid=0 2024-12-11T04:26:30,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741862_1038 (size=12001) 2024-12-11T04:26:30,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/ffba4a8a19314d82853e7744cdf48037 2024-12-11T04:26:30,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/0cf6791a0f3342a8b42c9ec46bb31fd5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/0cf6791a0f3342a8b42c9ec46bb31fd5 2024-12-11T04:26:30,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/0cf6791a0f3342a8b42c9ec46bb31fd5, entries=150, sequenceid=116, filesize=11.7 K 2024-12-11T04:26:30,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/159e6bcc07be443d816600856ad6b75e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/159e6bcc07be443d816600856ad6b75e 2024-12-11T04:26:31,006 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/159e6bcc07be443d816600856ad6b75e, entries=150, sequenceid=116, filesize=11.7 K 2024-12-11T04:26:31,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/ffba4a8a19314d82853e7744cdf48037 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ffba4a8a19314d82853e7744cdf48037 2024-12-11T04:26:31,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ffba4a8a19314d82853e7744cdf48037, entries=150, sequenceid=116, filesize=11.7 K 2024-12-11T04:26:31,020 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for c8c23c02526ae28f7a94d562fbd47bb4 in 657ms, sequenceid=116, compaction requested=false 2024-12-11T04:26:31,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:31,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:31,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T04:26:31,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:31,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:31,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:31,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:31,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:31,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:31,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/044e916043df46b4ae5086ac84c83527 is 50, key is test_row_0/A:col10/1733891190381/Put/seqid=0 2024-12-11T04:26:31,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-11T04:26:31,057 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-11T04:26:31,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:31,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:31,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:31,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:31,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:31,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:31,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891251065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891251066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891251067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891251071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891251072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741863_1039 (size=14391) 2024-12-11T04:26:31,090 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/044e916043df46b4ae5086ac84c83527 2024-12-11T04:26:31,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/f2fc1de793564afe9f5ba84a05ec08a6 is 50, key is test_row_0/B:col10/1733891190381/Put/seqid=0 2024-12-11T04:26:31,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741864_1040 (size=12051) 2024-12-11T04:26:31,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/f2fc1de793564afe9f5ba84a05ec08a6 2024-12-11T04:26:31,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891251176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891251178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891251183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891251178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/71d7c9689fc1413f93ceba775234791d is 50, key is test_row_0/C:col10/1733891190381/Put/seqid=0 2024-12-11T04:26:31,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891251184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,212 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,212 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-11T04:26:31,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:31,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:31,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:31,213 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:31,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:31,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:31,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741865_1041 (size=12051) 2024-12-11T04:26:31,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/71d7c9689fc1413f93ceba775234791d 2024-12-11T04:26:31,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/044e916043df46b4ae5086ac84c83527 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/044e916043df46b4ae5086ac84c83527 2024-12-11T04:26:31,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/044e916043df46b4ae5086ac84c83527, entries=200, sequenceid=133, filesize=14.1 K 2024-12-11T04:26:31,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/f2fc1de793564afe9f5ba84a05ec08a6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/f2fc1de793564afe9f5ba84a05ec08a6 2024-12-11T04:26:31,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/f2fc1de793564afe9f5ba84a05ec08a6, entries=150, sequenceid=133, filesize=11.8 K 2024-12-11T04:26:31,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/71d7c9689fc1413f93ceba775234791d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/71d7c9689fc1413f93ceba775234791d 2024-12-11T04:26:31,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/71d7c9689fc1413f93ceba775234791d, entries=150, sequenceid=133, filesize=11.8 K 2024-12-11T04:26:31,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for c8c23c02526ae28f7a94d562fbd47bb4 in 244ms, sequenceid=133, compaction requested=true 2024-12-11T04:26:31,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:31,273 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:31,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:31,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:31,274 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:31,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:31,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:31,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-11T04:26:31,274 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:31,276 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38599 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:31,276 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:31,276 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:31,276 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/326763d2c70e4256abf5ecbbcec31a21, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/0cf6791a0f3342a8b42c9ec46bb31fd5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/044e916043df46b4ae5086ac84c83527] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.7 K 2024-12-11T04:26:31,277 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:31,277 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:31,277 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:31,278 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/fa1c6b5eea8e45d4ba5534c373d232ca, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/159e6bcc07be443d816600856ad6b75e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/f2fc1de793564afe9f5ba84a05ec08a6] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=35.4 K 2024-12-11T04:26:31,278 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 326763d2c70e4256abf5ecbbcec31a21, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733891189672 2024-12-11T04:26:31,279 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa1c6b5eea8e45d4ba5534c373d232ca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733891189672 2024-12-11T04:26:31,279 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 0cf6791a0f3342a8b42c9ec46bb31fd5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733891190019 2024-12-11T04:26:31,280 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 159e6bcc07be443d816600856ad6b75e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733891190019 2024-12-11T04:26:31,280 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 044e916043df46b4ae5086ac84c83527, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733891190371 2024-12-11T04:26:31,282 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2fc1de793564afe9f5ba84a05ec08a6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733891190377 2024-12-11T04:26:31,314 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#27 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:31,315 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/7597a582fd33493493f9eddc7c6d9707 is 50, key is test_row_0/B:col10/1733891190381/Put/seqid=0 2024-12-11T04:26:31,331 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:31,332 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/e95926f5d5884e9c9cecd155b438ae0e is 50, key is test_row_0/A:col10/1733891190381/Put/seqid=0 2024-12-11T04:26:31,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741866_1042 (size=12359) 2024-12-11T04:26:31,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741867_1043 (size=12359) 2024-12-11T04:26:31,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-11T04:26:31,361 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/7597a582fd33493493f9eddc7c6d9707 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/7597a582fd33493493f9eddc7c6d9707 2024-12-11T04:26:31,368 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-11T04:26:31,369 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/e95926f5d5884e9c9cecd155b438ae0e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e95926f5d5884e9c9cecd155b438ae0e 2024-12-11T04:26:31,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:31,369 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T04:26:31,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:31,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:31,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:31,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:31,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:31,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:31,380 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into 7597a582fd33493493f9eddc7c6d9707(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:31,381 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:31,381 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891191274; duration=0sec 2024-12-11T04:26:31,381 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:31,381 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:31,381 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:31,382 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into e95926f5d5884e9c9cecd155b438ae0e(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:31,382 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:31,382 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891191273; duration=0sec 2024-12-11T04:26:31,383 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:31,383 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:31,384 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:31,384 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:31,384 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:31,384 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/689c5752e9a8487dbc9e824cb82034f6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ffba4a8a19314d82853e7744cdf48037, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/71d7c9689fc1413f93ceba775234791d] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=35.4 K 2024-12-11T04:26:31,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/afdbaaa4e64e4460a445f2fd084b4262 is 50, key is test_row_0/A:col10/1733891191068/Put/seqid=0 2024-12-11T04:26:31,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:31,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:31,387 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 689c5752e9a8487dbc9e824cb82034f6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1733891189672 2024-12-11T04:26:31,388 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffba4a8a19314d82853e7744cdf48037, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733891190019 2024-12-11T04:26:31,389 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71d7c9689fc1413f93ceba775234791d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733891190377 2024-12-11T04:26:31,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891251404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741868_1044 (size=12151) 2024-12-11T04:26:31,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891251404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891251408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891251410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891251411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,432 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#30 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:31,433 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/317a3c352bdd4cc0b699b2cb57a1ed5b is 50, key is test_row_0/C:col10/1733891190381/Put/seqid=0 2024-12-11T04:26:31,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741869_1045 (size=12359) 2024-12-11T04:26:31,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891251515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891251515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891251518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891251519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891251518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,721 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891251719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891251721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891251726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891251728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:31,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891251732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:31,814 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/afdbaaa4e64e4460a445f2fd084b4262 2024-12-11T04:26:31,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/532c620f0c1d44ea805c9592e180e39e is 50, key is test_row_0/B:col10/1733891191068/Put/seqid=0 2024-12-11T04:26:31,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741870_1046 (size=12151) 2024-12-11T04:26:31,851 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/532c620f0c1d44ea805c9592e180e39e 2024-12-11T04:26:31,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-11T04:26:31,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/9a6350ddeaef441a93fa71f41ea459eb is 50, key is test_row_0/C:col10/1733891191068/Put/seqid=0 2024-12-11T04:26:31,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741871_1047 (size=12151) 2024-12-11T04:26:31,893 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/9a6350ddeaef441a93fa71f41ea459eb 2024-12-11T04:26:31,896 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/317a3c352bdd4cc0b699b2cb57a1ed5b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/317a3c352bdd4cc0b699b2cb57a1ed5b 2024-12-11T04:26:31,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/afdbaaa4e64e4460a445f2fd084b4262 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/afdbaaa4e64e4460a445f2fd084b4262 2024-12-11T04:26:31,910 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into 317a3c352bdd4cc0b699b2cb57a1ed5b(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:31,911 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:31,911 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891191274; duration=0sec 2024-12-11T04:26:31,912 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:31,912 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:31,915 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/afdbaaa4e64e4460a445f2fd084b4262, entries=150, sequenceid=156, filesize=11.9 K 2024-12-11T04:26:31,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/532c620f0c1d44ea805c9592e180e39e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/532c620f0c1d44ea805c9592e180e39e 2024-12-11T04:26:31,928 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/532c620f0c1d44ea805c9592e180e39e, entries=150, sequenceid=156, filesize=11.9 K 2024-12-11T04:26:31,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/9a6350ddeaef441a93fa71f41ea459eb as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9a6350ddeaef441a93fa71f41ea459eb 2024-12-11T04:26:31,948 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9a6350ddeaef441a93fa71f41ea459eb, entries=150, sequenceid=156, filesize=11.9 K 2024-12-11T04:26:31,951 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for c8c23c02526ae28f7a94d562fbd47bb4 in 582ms, sequenceid=156, compaction requested=false 2024-12-11T04:26:31,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:31,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:31,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-11T04:26:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-11T04:26:31,959 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-11T04:26:31,959 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2060 sec 2024-12-11T04:26:31,963 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.2170 sec 2024-12-11T04:26:32,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:32,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T04:26:32,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:32,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:32,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:32,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:32,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:32,031 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:32,046 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/375b26247cc44950ac96e74acdff4f11 is 50, key is test_row_0/A:col10/1733891192027/Put/seqid=0 2024-12-11T04:26:32,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891252068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891252069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891252073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891252074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891252076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741872_1048 (size=12151) 2024-12-11T04:26:32,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891252176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891252179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891252179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891252180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891252180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891252380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891252384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891252384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891252384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891252388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,485 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/375b26247cc44950ac96e74acdff4f11 2024-12-11T04:26:32,500 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/6e5bd8a6d3e64f778ee2b7a18a7eccce is 50, key is test_row_0/B:col10/1733891192027/Put/seqid=0 2024-12-11T04:26:32,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741873_1049 (size=12151) 2024-12-11T04:26:32,516 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/6e5bd8a6d3e64f778ee2b7a18a7eccce 2024-12-11T04:26:32,536 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/d59cd27e729b4510a43cde0b541f6316 is 50, key is test_row_0/C:col10/1733891192027/Put/seqid=0 2024-12-11T04:26:32,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741874_1050 (size=12151) 2024-12-11T04:26:32,584 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/d59cd27e729b4510a43cde0b541f6316 2024-12-11T04:26:32,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/375b26247cc44950ac96e74acdff4f11 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/375b26247cc44950ac96e74acdff4f11 2024-12-11T04:26:32,613 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/375b26247cc44950ac96e74acdff4f11, entries=150, sequenceid=173, filesize=11.9 K 2024-12-11T04:26:32,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/6e5bd8a6d3e64f778ee2b7a18a7eccce as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/6e5bd8a6d3e64f778ee2b7a18a7eccce 2024-12-11T04:26:32,626 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/6e5bd8a6d3e64f778ee2b7a18a7eccce, entries=150, sequenceid=173, filesize=11.9 K 2024-12-11T04:26:32,628 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/d59cd27e729b4510a43cde0b541f6316 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/d59cd27e729b4510a43cde0b541f6316 2024-12-11T04:26:32,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/d59cd27e729b4510a43cde0b541f6316, entries=150, sequenceid=173, filesize=11.9 K 2024-12-11T04:26:32,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for c8c23c02526ae28f7a94d562fbd47bb4 in 608ms, sequenceid=173, compaction requested=true 2024-12-11T04:26:32,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:32,639 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:32,640 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:32,640 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:32,640 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:32,641 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e95926f5d5884e9c9cecd155b438ae0e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/afdbaaa4e64e4460a445f2fd084b4262, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/375b26247cc44950ac96e74acdff4f11] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=35.8 K 2024-12-11T04:26:32,641 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting e95926f5d5884e9c9cecd155b438ae0e, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733891190377 2024-12-11T04:26:32,642 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting afdbaaa4e64e4460a445f2fd084b4262, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733891191065 2024-12-11T04:26:32,643 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 375b26247cc44950ac96e74acdff4f11, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733891191402 2024-12-11T04:26:32,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:32,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:32,646 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:32,648 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:32,648 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:32,648 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:32,648 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/7597a582fd33493493f9eddc7c6d9707, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/532c620f0c1d44ea805c9592e180e39e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/6e5bd8a6d3e64f778ee2b7a18a7eccce] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=35.8 K 2024-12-11T04:26:32,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:32,651 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 7597a582fd33493493f9eddc7c6d9707, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733891190377 2024-12-11T04:26:32,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:32,651 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 532c620f0c1d44ea805c9592e180e39e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733891191065 2024-12-11T04:26:32,652 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e5bd8a6d3e64f778ee2b7a18a7eccce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733891191402 2024-12-11T04:26:32,654 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:32,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:32,671 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#36 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:32,671 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/d235572b7dd745e49005a63f2c4d7695 is 50, key is test_row_0/A:col10/1733891192027/Put/seqid=0 2024-12-11T04:26:32,682 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#37 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:32,684 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/e2a5b4b75cd64466ad60d1ae1149f8cc is 50, key is test_row_0/B:col10/1733891192027/Put/seqid=0 2024-12-11T04:26:32,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:32,689 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T04:26:32,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:32,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:32,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:32,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:32,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:32,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:32,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741875_1051 (size=12561) 2024-12-11T04:26:32,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891252703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,710 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891252703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891252704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891252706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891252707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,715 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/d235572b7dd745e49005a63f2c4d7695 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/d235572b7dd745e49005a63f2c4d7695 2024-12-11T04:26:32,727 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into d235572b7dd745e49005a63f2c4d7695(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:32,727 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:32,727 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891192639; duration=0sec 2024-12-11T04:26:32,727 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:32,728 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:32,728 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:32,729 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:32,729 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:32,730 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:32,730 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/317a3c352bdd4cc0b699b2cb57a1ed5b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9a6350ddeaef441a93fa71f41ea459eb, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/d59cd27e729b4510a43cde0b541f6316] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=35.8 K 2024-12-11T04:26:32,732 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 317a3c352bdd4cc0b699b2cb57a1ed5b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1733891190377 2024-12-11T04:26:32,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741876_1052 (size=12561) 2024-12-11T04:26:32,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/11e46bba6c954fdf800428e022eec11a is 50, key is test_row_0/A:col10/1733891192064/Put/seqid=0 2024-12-11T04:26:32,734 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a6350ddeaef441a93fa71f41ea459eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733891191065 2024-12-11T04:26:32,736 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting d59cd27e729b4510a43cde0b541f6316, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733891191402 2024-12-11T04:26:32,746 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/e2a5b4b75cd64466ad60d1ae1149f8cc as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e2a5b4b75cd64466ad60d1ae1149f8cc 2024-12-11T04:26:32,758 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into e2a5b4b75cd64466ad60d1ae1149f8cc(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:32,759 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:32,759 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891192646; duration=0sec 2024-12-11T04:26:32,759 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:32,759 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:32,778 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:32,779 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/07caff6d6cb84bab9406fa236d7d7982 is 50, key is test_row_0/C:col10/1733891192027/Put/seqid=0 2024-12-11T04:26:32,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741877_1053 (size=14541) 2024-12-11T04:26:32,796 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/11e46bba6c954fdf800428e022eec11a 2024-12-11T04:26:32,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891252812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891252813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741878_1054 (size=12561) 2024-12-11T04:26:32,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891252814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891252814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:32,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/5907226f93a94c02978330194a5fa9cd is 50, key is test_row_0/B:col10/1733891192064/Put/seqid=0 2024-12-11T04:26:32,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891252814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:32,839 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/07caff6d6cb84bab9406fa236d7d7982 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/07caff6d6cb84bab9406fa236d7d7982 2024-12-11T04:26:32,850 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into 07caff6d6cb84bab9406fa236d7d7982(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:32,850 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:32,851 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891192651; duration=0sec 2024-12-11T04:26:32,851 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:32,851 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:32,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-11T04:26:32,857 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-11T04:26:32,859 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:32,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-11T04:26:32,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-11T04:26:32,862 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:32,863 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:32,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:32,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741879_1055 (size=12151) 2024-12-11T04:26:32,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/5907226f93a94c02978330194a5fa9cd 2024-12-11T04:26:32,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/e3daf39b4dd442f88a14e4cd0fafea1b is 50, key is test_row_0/C:col10/1733891192064/Put/seqid=0 2024-12-11T04:26:32,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741880_1056 (size=12151) 2024-12-11T04:26:32,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-11T04:26:33,017 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-11T04:26:33,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:33,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:33,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:33,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:33,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:33,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:33,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891253021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891253022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891253023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891253024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891253025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-11T04:26:33,171 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,172 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-11T04:26:33,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:33,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:33,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:33,172 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:33,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:33,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:33,321 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/e3daf39b4dd442f88a14e4cd0fafea1b 2024-12-11T04:26:33,325 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,325 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891253325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-11T04:26:33,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891253325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:33,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:33,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:33,326 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:33,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:33,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:33,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/11e46bba6c954fdf800428e022eec11a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/11e46bba6c954fdf800428e022eec11a 2024-12-11T04:26:33,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891253328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891253329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891253329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,337 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/11e46bba6c954fdf800428e022eec11a, entries=200, sequenceid=196, filesize=14.2 K 2024-12-11T04:26:33,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/5907226f93a94c02978330194a5fa9cd as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5907226f93a94c02978330194a5fa9cd 2024-12-11T04:26:33,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5907226f93a94c02978330194a5fa9cd, entries=150, sequenceid=196, filesize=11.9 K 2024-12-11T04:26:33,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/e3daf39b4dd442f88a14e4cd0fafea1b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e3daf39b4dd442f88a14e4cd0fafea1b 2024-12-11T04:26:33,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e3daf39b4dd442f88a14e4cd0fafea1b, entries=150, sequenceid=196, filesize=11.9 K 2024-12-11T04:26:33,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for c8c23c02526ae28f7a94d562fbd47bb4 in 667ms, sequenceid=196, compaction requested=false 2024-12-11T04:26:33,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:33,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-11T04:26:33,480 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-11T04:26:33,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:33,481 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T04:26:33,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:33,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:33,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:33,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:33,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:33,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:33,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/145d047a69dc421ca25c809357504060 is 50, key is test_row_0/A:col10/1733891192702/Put/seqid=0 2024-12-11T04:26:33,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741881_1057 (size=12151) 2024-12-11T04:26:33,519 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/145d047a69dc421ca25c809357504060 2024-12-11T04:26:33,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/b96936c1ca5a4173a7557026da633d8b is 50, key is test_row_0/B:col10/1733891192702/Put/seqid=0 2024-12-11T04:26:33,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741882_1058 (size=12151) 2024-12-11T04:26:33,571 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/b96936c1ca5a4173a7557026da633d8b 2024-12-11T04:26:33,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/3fda5234be284a16a367612da304acd3 is 50, key is test_row_0/C:col10/1733891192702/Put/seqid=0 2024-12-11T04:26:33,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741883_1059 (size=12151) 2024-12-11T04:26:33,629 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/3fda5234be284a16a367612da304acd3 2024-12-11T04:26:33,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/145d047a69dc421ca25c809357504060 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/145d047a69dc421ca25c809357504060 2024-12-11T04:26:33,682 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/145d047a69dc421ca25c809357504060, entries=150, sequenceid=212, filesize=11.9 K 2024-12-11T04:26:33,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/b96936c1ca5a4173a7557026da633d8b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b96936c1ca5a4173a7557026da633d8b 2024-12-11T04:26:33,690 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b96936c1ca5a4173a7557026da633d8b, entries=150, sequenceid=212, filesize=11.9 K 2024-12-11T04:26:33,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/3fda5234be284a16a367612da304acd3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/3fda5234be284a16a367612da304acd3 2024-12-11T04:26:33,700 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/3fda5234be284a16a367612da304acd3, entries=150, sequenceid=212, filesize=11.9 K 2024-12-11T04:26:33,701 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for c8c23c02526ae28f7a94d562fbd47bb4 in 220ms, sequenceid=212, compaction requested=true 2024-12-11T04:26:33,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:33,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:33,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-11T04:26:33,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-11T04:26:33,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-11T04:26:33,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 840 msec 2024-12-11T04:26:33,709 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 848 msec 2024-12-11T04:26:33,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:26:33,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:33,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:33,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:33,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:33,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:33,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:33,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:33,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5b5b653c06784bd8b5371dd100dc2905 is 50, key is test_row_0/A:col10/1733891193844/Put/seqid=0 2024-12-11T04:26:33,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741884_1060 (size=9757) 2024-12-11T04:26:33,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891253890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891253890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891253892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891253892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,897 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891253893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:33,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-11T04:26:33,967 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-11T04:26:33,970 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:33,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-11T04:26:33,975 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:33,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-11T04:26:33,976 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:33,976 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:33,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:33,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891253997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,001 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891253999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891253999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891253999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891254000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-11T04:26:34,137 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,138 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-11T04:26:34,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:34,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:34,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:34,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:34,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:34,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891254200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891254204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891254205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891254205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891254206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-11T04:26:34,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5b5b653c06784bd8b5371dd100dc2905 2024-12-11T04:26:34,295 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-11T04:26:34,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:34,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:34,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:34,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:34,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:34,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:34,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/33976c29cff548b18b4347c3d665e47f is 50, key is test_row_0/B:col10/1733891193844/Put/seqid=0 2024-12-11T04:26:34,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741885_1061 (size=9757) 2024-12-11T04:26:34,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/33976c29cff548b18b4347c3d665e47f 2024-12-11T04:26:34,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/0c83aebe0f1547c0949bc7be6c843cad is 50, key is test_row_0/C:col10/1733891193844/Put/seqid=0 2024-12-11T04:26:34,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741886_1062 (size=9757) 2024-12-11T04:26:34,368 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=223 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/0c83aebe0f1547c0949bc7be6c843cad 2024-12-11T04:26:34,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5b5b653c06784bd8b5371dd100dc2905 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5b5b653c06784bd8b5371dd100dc2905 2024-12-11T04:26:34,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5b5b653c06784bd8b5371dd100dc2905, entries=100, sequenceid=223, filesize=9.5 K 2024-12-11T04:26:34,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/33976c29cff548b18b4347c3d665e47f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/33976c29cff548b18b4347c3d665e47f 2024-12-11T04:26:34,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/33976c29cff548b18b4347c3d665e47f, entries=100, sequenceid=223, filesize=9.5 K 2024-12-11T04:26:34,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/0c83aebe0f1547c0949bc7be6c843cad as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/0c83aebe0f1547c0949bc7be6c843cad 2024-12-11T04:26:34,417 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/0c83aebe0f1547c0949bc7be6c843cad, entries=100, sequenceid=223, filesize=9.5 K 2024-12-11T04:26:34,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c8c23c02526ae28f7a94d562fbd47bb4 in 572ms, sequenceid=223, compaction requested=true 2024-12-11T04:26:34,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:34,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:34,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:34,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:34,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:34,419 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:26:34,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:34,419 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:26:34,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:34,421 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49010 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:26:34,422 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:34,422 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:34,422 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/d235572b7dd745e49005a63f2c4d7695, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/11e46bba6c954fdf800428e022eec11a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/145d047a69dc421ca25c809357504060, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5b5b653c06784bd8b5371dd100dc2905] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=47.9 K 2024-12-11T04:26:34,423 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46620 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:26:34,423 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:34,423 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:34,424 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e2a5b4b75cd64466ad60d1ae1149f8cc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5907226f93a94c02978330194a5fa9cd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b96936c1ca5a4173a7557026da633d8b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/33976c29cff548b18b4347c3d665e47f] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=45.5 K 2024-12-11T04:26:34,424 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting d235572b7dd745e49005a63f2c4d7695, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733891191402 2024-12-11T04:26:34,424 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e2a5b4b75cd64466ad60d1ae1149f8cc, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733891191402 2024-12-11T04:26:34,425 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11e46bba6c954fdf800428e022eec11a, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733891192064 2024-12-11T04:26:34,425 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5907226f93a94c02978330194a5fa9cd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733891192064 2024-12-11T04:26:34,426 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 145d047a69dc421ca25c809357504060, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733891192693 2024-12-11T04:26:34,426 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting b96936c1ca5a4173a7557026da633d8b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733891192693 2024-12-11T04:26:34,426 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b5b653c06784bd8b5371dd100dc2905, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733891193844 2024-12-11T04:26:34,427 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 33976c29cff548b18b4347c3d665e47f, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733891193844 2024-12-11T04:26:34,451 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#48 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:34,452 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/2800ee67287c4385bf1029361bea29f5 is 50, key is test_row_0/B:col10/1733891193844/Put/seqid=0 2024-12-11T04:26:34,453 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-11T04:26:34,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:34,454 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T04:26:34,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:34,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:34,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:34,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:34,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:34,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:34,471 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#49 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:34,471 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/9937fd85f70e437786f15cc4a0641ef5 is 50, key is test_row_0/A:col10/1733891193844/Put/seqid=0 2024-12-11T04:26:34,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/ea53ae604e834bcbae3baa8eec4c6cba is 50, key is test_row_0/A:col10/1733891193890/Put/seqid=0 2024-12-11T04:26:34,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:34,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:34,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741888_1064 (size=12697) 2024-12-11T04:26:34,525 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/9937fd85f70e437786f15cc4a0641ef5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/9937fd85f70e437786f15cc4a0641ef5 2024-12-11T04:26:34,525 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891254518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891254523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891254524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741887_1063 (size=12697) 2024-12-11T04:26:34,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891254526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891254527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741889_1065 (size=12151) 2024-12-11T04:26:34,542 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into 9937fd85f70e437786f15cc4a0641ef5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:34,542 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:34,542 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=12, startTime=1733891194419; duration=0sec 2024-12-11T04:26:34,543 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:34,543 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:34,543 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:26:34,546 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/2800ee67287c4385bf1029361bea29f5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/2800ee67287c4385bf1029361bea29f5 2024-12-11T04:26:34,547 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46620 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:26:34,547 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:34,547 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:34,547 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/07caff6d6cb84bab9406fa236d7d7982, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e3daf39b4dd442f88a14e4cd0fafea1b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/3fda5234be284a16a367612da304acd3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/0c83aebe0f1547c0949bc7be6c843cad] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=45.5 K 2024-12-11T04:26:34,548 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07caff6d6cb84bab9406fa236d7d7982, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733891191402 2024-12-11T04:26:34,550 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3daf39b4dd442f88a14e4cd0fafea1b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1733891192064 2024-12-11T04:26:34,551 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fda5234be284a16a367612da304acd3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733891192693 2024-12-11T04:26:34,551 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c83aebe0f1547c0949bc7be6c843cad, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733891193844 2024-12-11T04:26:34,556 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into 2800ee67287c4385bf1029361bea29f5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:34,557 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:34,557 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=12, startTime=1733891194419; duration=0sec 2024-12-11T04:26:34,557 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:34,557 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:34,567 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#51 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:34,568 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/8ba2f9317d254ad6bf88fd24aec45c41 is 50, key is test_row_0/C:col10/1733891193844/Put/seqid=0 2024-12-11T04:26:34,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-11T04:26:34,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741890_1066 (size=12697) 2024-12-11T04:26:34,627 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/8ba2f9317d254ad6bf88fd24aec45c41 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/8ba2f9317d254ad6bf88fd24aec45c41 2024-12-11T04:26:34,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891254628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891254628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891254633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,636 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into 8ba2f9317d254ad6bf88fd24aec45c41(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:34,637 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:34,637 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=12, startTime=1733891194419; duration=0sec 2024-12-11T04:26:34,638 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:34,638 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:34,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891254639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891254638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891254833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891254835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891254839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891254844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891254844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:34,941 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/ea53ae604e834bcbae3baa8eec4c6cba 2024-12-11T04:26:34,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/e53f7586497648589b26ae9d3e582266 is 50, key is test_row_0/B:col10/1733891193890/Put/seqid=0 2024-12-11T04:26:35,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741891_1067 (size=12151) 2024-12-11T04:26:35,007 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/e53f7586497648589b26ae9d3e582266 2024-12-11T04:26:35,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/ee899e2620ff4bbbbb16cc42db1c7dd0 is 50, key is test_row_0/C:col10/1733891193890/Put/seqid=0 2024-12-11T04:26:35,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741892_1068 (size=12151) 2024-12-11T04:26:35,049 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/ee899e2620ff4bbbbb16cc42db1c7dd0 2024-12-11T04:26:35,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/ea53ae604e834bcbae3baa8eec4c6cba as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ea53ae604e834bcbae3baa8eec4c6cba 2024-12-11T04:26:35,069 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ea53ae604e834bcbae3baa8eec4c6cba, entries=150, sequenceid=248, filesize=11.9 K 2024-12-11T04:26:35,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/e53f7586497648589b26ae9d3e582266 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e53f7586497648589b26ae9d3e582266 2024-12-11T04:26:35,078 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e53f7586497648589b26ae9d3e582266, entries=150, sequenceid=248, filesize=11.9 K 2024-12-11T04:26:35,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/ee899e2620ff4bbbbb16cc42db1c7dd0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ee899e2620ff4bbbbb16cc42db1c7dd0 2024-12-11T04:26:35,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-11T04:26:35,087 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ee899e2620ff4bbbbb16cc42db1c7dd0, entries=150, sequenceid=248, filesize=11.9 K 2024-12-11T04:26:35,089 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for c8c23c02526ae28f7a94d562fbd47bb4 in 635ms, sequenceid=248, compaction requested=false 2024-12-11T04:26:35,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:35,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:35,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-11T04:26:35,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-11T04:26:35,094 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-11T04:26:35,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1150 sec 2024-12-11T04:26:35,098 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.1250 sec 2024-12-11T04:26:35,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:35,142 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T04:26:35,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:35,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:35,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:35,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:35,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:35,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:35,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/3ac4ae70e1844f528c7c3662f9ddbc68 is 50, key is test_row_0/A:col10/1733891195140/Put/seqid=0 2024-12-11T04:26:35,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891255178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891255177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891255180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891255180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891255182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741893_1069 (size=14691) 2024-12-11T04:26:35,210 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/3ac4ae70e1844f528c7c3662f9ddbc68 2024-12-11T04:26:35,230 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/b1b4b4c81a75432190f3c16d2f7201d2 is 50, key is test_row_0/B:col10/1733891195140/Put/seqid=0 2024-12-11T04:26:35,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741894_1070 (size=12301) 2024-12-11T04:26:35,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891255286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891255287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891255287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891255288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891255289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891255489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891255490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891255491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891255492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891255497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,667 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/b1b4b4c81a75432190f3c16d2f7201d2 2024-12-11T04:26:35,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/98b4e14201844fa78b86fb315a918b75 is 50, key is test_row_0/C:col10/1733891195140/Put/seqid=0 2024-12-11T04:26:35,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741895_1071 (size=12301) 2024-12-11T04:26:35,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891255791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891255797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891255797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891255799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:35,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:35,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891255803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-11T04:26:36,082 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-11T04:26:36,086 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:36,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-11T04:26:36,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T04:26:36,090 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:36,091 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:36,091 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:36,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/98b4e14201844fa78b86fb315a918b75 2024-12-11T04:26:36,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/3ac4ae70e1844f528c7c3662f9ddbc68 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3ac4ae70e1844f528c7c3662f9ddbc68 2024-12-11T04:26:36,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3ac4ae70e1844f528c7c3662f9ddbc68, entries=200, sequenceid=265, filesize=14.3 K 2024-12-11T04:26:36,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/b1b4b4c81a75432190f3c16d2f7201d2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b1b4b4c81a75432190f3c16d2f7201d2 2024-12-11T04:26:36,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b1b4b4c81a75432190f3c16d2f7201d2, entries=150, sequenceid=265, filesize=12.0 K 2024-12-11T04:26:36,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/98b4e14201844fa78b86fb315a918b75 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/98b4e14201844fa78b86fb315a918b75 2024-12-11T04:26:36,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/98b4e14201844fa78b86fb315a918b75, entries=150, sequenceid=265, filesize=12.0 K 2024-12-11T04:26:36,141 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for c8c23c02526ae28f7a94d562fbd47bb4 in 1000ms, sequenceid=265, compaction requested=true 2024-12-11T04:26:36,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:36,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:36,142 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:36,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:36,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:36,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:36,142 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:36,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:36,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:36,143 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:36,143 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:36,144 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39539 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:36,144 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:36,144 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:36,144 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/9937fd85f70e437786f15cc4a0641ef5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ea53ae604e834bcbae3baa8eec4c6cba, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3ac4ae70e1844f528c7c3662f9ddbc68] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=38.6 K 2024-12-11T04:26:36,144 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:36,145 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/2800ee67287c4385bf1029361bea29f5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e53f7586497648589b26ae9d3e582266, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b1b4b4c81a75432190f3c16d2f7201d2] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=36.3 K 2024-12-11T04:26:36,145 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 2800ee67287c4385bf1029361bea29f5, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733891192705 2024-12-11T04:26:36,146 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9937fd85f70e437786f15cc4a0641ef5, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733891192705 2024-12-11T04:26:36,146 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e53f7586497648589b26ae9d3e582266, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733891193877 2024-12-11T04:26:36,146 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea53ae604e834bcbae3baa8eec4c6cba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733891193877 2024-12-11T04:26:36,146 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting b1b4b4c81a75432190f3c16d2f7201d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733891194524 2024-12-11T04:26:36,148 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ac4ae70e1844f528c7c3662f9ddbc68, keycount=200, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733891194513 2024-12-11T04:26:36,167 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#57 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:36,167 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/237c97a65cdc4664862af8026c09c323 is 50, key is test_row_0/B:col10/1733891195140/Put/seqid=0 2024-12-11T04:26:36,171 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#58 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:36,171 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/3a0d793ccff94af197e7b73363376390 is 50, key is test_row_0/A:col10/1733891195140/Put/seqid=0 2024-12-11T04:26:36,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741897_1073 (size=12949) 2024-12-11T04:26:36,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T04:26:36,194 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/3a0d793ccff94af197e7b73363376390 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3a0d793ccff94af197e7b73363376390 2024-12-11T04:26:36,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741896_1072 (size=12949) 2024-12-11T04:26:36,205 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into 3a0d793ccff94af197e7b73363376390(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:36,205 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:36,205 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891196141; duration=0sec 2024-12-11T04:26:36,206 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:36,206 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:36,206 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:36,207 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:36,207 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:36,207 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:36,207 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/8ba2f9317d254ad6bf88fd24aec45c41, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ee899e2620ff4bbbbb16cc42db1c7dd0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/98b4e14201844fa78b86fb315a918b75] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=36.3 K 2024-12-11T04:26:36,208 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ba2f9317d254ad6bf88fd24aec45c41, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=223, earliestPutTs=1733891192705 2024-12-11T04:26:36,208 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee899e2620ff4bbbbb16cc42db1c7dd0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733891193877 2024-12-11T04:26:36,210 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98b4e14201844fa78b86fb315a918b75, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733891194524 2024-12-11T04:26:36,228 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#59 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:36,229 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/cec8f467c8f3471885cfadc09dd073e5 is 50, key is test_row_0/C:col10/1733891195140/Put/seqid=0 2024-12-11T04:26:36,244 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-11T04:26:36,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:36,245 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T04:26:36,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:36,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:36,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:36,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:36,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:36,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:36,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741898_1074 (size=12949) 2024-12-11T04:26:36,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/97cd757e64b442f48677d0f24449ad75 is 50, key is test_row_0/A:col10/1733891195177/Put/seqid=0 2024-12-11T04:26:36,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741899_1075 (size=12301) 2024-12-11T04:26:36,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:36,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:36,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891256315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891256315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891256317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891256319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891256321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T04:26:36,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891256421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891256423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891256423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891256424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891256427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,614 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/237c97a65cdc4664862af8026c09c323 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/237c97a65cdc4664862af8026c09c323 2024-12-11T04:26:36,624 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into 237c97a65cdc4664862af8026c09c323(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:36,624 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:36,624 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891196142; duration=0sec 2024-12-11T04:26:36,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891256627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891256625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891256627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891256629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,634 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:36,634 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:36,643 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891256635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,656 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/cec8f467c8f3471885cfadc09dd073e5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cec8f467c8f3471885cfadc09dd073e5 2024-12-11T04:26:36,665 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into cec8f467c8f3471885cfadc09dd073e5(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:36,665 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:36,665 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891196142; duration=0sec 2024-12-11T04:26:36,665 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:36,665 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:36,670 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/97cd757e64b442f48677d0f24449ad75 2024-12-11T04:26:36,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/ab165c507ec2465195239969fa29bfd2 is 50, key is test_row_0/B:col10/1733891195177/Put/seqid=0 2024-12-11T04:26:36,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T04:26:36,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741900_1076 (size=12301) 2024-12-11T04:26:36,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891256929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891256931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891256932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891256933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:36,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891256946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,123 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/ab165c507ec2465195239969fa29bfd2 2024-12-11T04:26:37,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/5448e84d8098494688d4b6a8fd0fbd15 is 50, key is test_row_0/C:col10/1733891195177/Put/seqid=0 2024-12-11T04:26:37,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741901_1077 (size=12301) 2024-12-11T04:26:37,169 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/5448e84d8098494688d4b6a8fd0fbd15 2024-12-11T04:26:37,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/97cd757e64b442f48677d0f24449ad75 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/97cd757e64b442f48677d0f24449ad75 2024-12-11T04:26:37,190 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/97cd757e64b442f48677d0f24449ad75, entries=150, sequenceid=288, filesize=12.0 K 2024-12-11T04:26:37,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/ab165c507ec2465195239969fa29bfd2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/ab165c507ec2465195239969fa29bfd2 2024-12-11T04:26:37,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T04:26:37,214 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/ab165c507ec2465195239969fa29bfd2, entries=150, sequenceid=288, filesize=12.0 K 2024-12-11T04:26:37,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/5448e84d8098494688d4b6a8fd0fbd15 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/5448e84d8098494688d4b6a8fd0fbd15 2024-12-11T04:26:37,228 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/5448e84d8098494688d4b6a8fd0fbd15, entries=150, sequenceid=288, filesize=12.0 K 2024-12-11T04:26:37,230 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for c8c23c02526ae28f7a94d562fbd47bb4 in 985ms, sequenceid=288, compaction requested=false 2024-12-11T04:26:37,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:37,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:37,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-11T04:26:37,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-11T04:26:37,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-11T04:26:37,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1410 sec 2024-12-11T04:26:37,238 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.1500 sec 2024-12-11T04:26:37,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:37,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-11T04:26:37,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:37,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:37,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:37,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:37,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:37,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:37,445 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/62f28cd2a77b46fc9ff95cb0acbda299 is 50, key is test_row_1/A:col10/1733891196316/Put/seqid=0 2024-12-11T04:26:37,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891257467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741902_1078 (size=9857) 2024-12-11T04:26:37,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891257468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/62f28cd2a77b46fc9ff95cb0acbda299 2024-12-11T04:26:37,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891257470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891257472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891257473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/8a2a5fb2967e454d908a9d0ada9441ca is 50, key is test_row_1/B:col10/1733891196316/Put/seqid=0 2024-12-11T04:26:37,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741903_1079 (size=9857) 2024-12-11T04:26:37,525 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/8a2a5fb2967e454d908a9d0ada9441ca 2024-12-11T04:26:37,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/bcb5b0ed791c40298b8109d3b853d570 is 50, key is test_row_1/C:col10/1733891196316/Put/seqid=0 2024-12-11T04:26:37,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741904_1080 (size=9857) 2024-12-11T04:26:37,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=306 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/bcb5b0ed791c40298b8109d3b853d570 2024-12-11T04:26:37,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/62f28cd2a77b46fc9ff95cb0acbda299 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/62f28cd2a77b46fc9ff95cb0acbda299 2024-12-11T04:26:37,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/62f28cd2a77b46fc9ff95cb0acbda299, entries=100, sequenceid=306, filesize=9.6 K 2024-12-11T04:26:37,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/8a2a5fb2967e454d908a9d0ada9441ca as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/8a2a5fb2967e454d908a9d0ada9441ca 2024-12-11T04:26:37,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891257575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891257575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891257578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891257578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891257578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/8a2a5fb2967e454d908a9d0ada9441ca, entries=100, sequenceid=306, filesize=9.6 K 2024-12-11T04:26:37,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/bcb5b0ed791c40298b8109d3b853d570 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/bcb5b0ed791c40298b8109d3b853d570 2024-12-11T04:26:37,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/bcb5b0ed791c40298b8109d3b853d570, entries=100, sequenceid=306, filesize=9.6 K 2024-12-11T04:26:37,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for c8c23c02526ae28f7a94d562fbd47bb4 in 166ms, sequenceid=306, compaction requested=true 2024-12-11T04:26:37,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:37,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:37,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:37,604 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:37,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:37,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:37,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:37,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-11T04:26:37,604 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:37,606 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:37,606 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:37,606 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:37,606 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3a0d793ccff94af197e7b73363376390, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/97cd757e64b442f48677d0f24449ad75, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/62f28cd2a77b46fc9ff95cb0acbda299] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=34.3 K 2024-12-11T04:26:37,606 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:37,607 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:37,607 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:37,607 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/237c97a65cdc4664862af8026c09c323, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/ab165c507ec2465195239969fa29bfd2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/8a2a5fb2967e454d908a9d0ada9441ca] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=34.3 K 2024-12-11T04:26:37,607 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a0d793ccff94af197e7b73363376390, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733891194524 2024-12-11T04:26:37,608 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 237c97a65cdc4664862af8026c09c323, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733891194524 2024-12-11T04:26:37,608 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 97cd757e64b442f48677d0f24449ad75, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1733891195164 2024-12-11T04:26:37,608 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab165c507ec2465195239969fa29bfd2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1733891195164 2024-12-11T04:26:37,609 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a2a5fb2967e454d908a9d0ada9441ca, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733891196316 2024-12-11T04:26:37,609 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 62f28cd2a77b46fc9ff95cb0acbda299, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733891196316 2024-12-11T04:26:37,623 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#66 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:37,624 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/5ee4adbb838649be85d1a1441c8b159c is 50, key is test_row_0/B:col10/1733891195177/Put/seqid=0 2024-12-11T04:26:37,631 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#67 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:37,631 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/22b16a59a935484685592d9b1c734513 is 50, key is test_row_0/A:col10/1733891195177/Put/seqid=0 2024-12-11T04:26:37,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741906_1082 (size=13051) 2024-12-11T04:26:37,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741905_1081 (size=13051) 2024-12-11T04:26:37,673 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/22b16a59a935484685592d9b1c734513 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/22b16a59a935484685592d9b1c734513 2024-12-11T04:26:37,677 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/5ee4adbb838649be85d1a1441c8b159c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5ee4adbb838649be85d1a1441c8b159c 2024-12-11T04:26:37,688 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into 5ee4adbb838649be85d1a1441c8b159c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:37,688 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:37,688 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891197604; duration=0sec 2024-12-11T04:26:37,689 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:37,689 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:37,689 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:37,690 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:37,691 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:37,691 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:37,691 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cec8f467c8f3471885cfadc09dd073e5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/5448e84d8098494688d4b6a8fd0fbd15, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/bcb5b0ed791c40298b8109d3b853d570] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=34.3 K 2024-12-11T04:26:37,691 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting cec8f467c8f3471885cfadc09dd073e5, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1733891194524 2024-12-11T04:26:37,692 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5448e84d8098494688d4b6a8fd0fbd15, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1733891195164 2024-12-11T04:26:37,692 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into 22b16a59a935484685592d9b1c734513(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:37,692 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:37,693 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcb5b0ed791c40298b8109d3b853d570, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733891196316 2024-12-11T04:26:37,693 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891197604; duration=0sec 2024-12-11T04:26:37,693 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:37,693 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:37,703 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#68 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:37,704 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/e5e2af57af4f4843aeb06204980fbe72 is 50, key is test_row_0/C:col10/1733891195177/Put/seqid=0 2024-12-11T04:26:37,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741907_1083 (size=13051) 2024-12-11T04:26:37,733 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/e5e2af57af4f4843aeb06204980fbe72 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e5e2af57af4f4843aeb06204980fbe72 2024-12-11T04:26:37,742 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into e5e2af57af4f4843aeb06204980fbe72(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:37,743 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:37,743 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891197604; duration=0sec 2024-12-11T04:26:37,743 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:37,743 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:37,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-11T04:26:37,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:37,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:37,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:37,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:37,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:37,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:37,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/3546ec8a45fd4d92ba70990e363e0bda is 50, key is test_row_0/A:col10/1733891197784/Put/seqid=0 2024-12-11T04:26:37,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891257805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,810 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891257807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891257808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891257808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891257814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741908_1084 (size=17181) 2024-12-11T04:26:37,821 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/3546ec8a45fd4d92ba70990e363e0bda 2024-12-11T04:26:37,839 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/ac87311c50d647a2b730e854e6d41afe is 50, key is test_row_0/B:col10/1733891197784/Put/seqid=0 2024-12-11T04:26:37,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741909_1085 (size=12301) 2024-12-11T04:26:37,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/ac87311c50d647a2b730e854e6d41afe 2024-12-11T04:26:37,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/8d0e9c0e9e3a4674b2543b623bbcdd96 is 50, key is test_row_0/C:col10/1733891197784/Put/seqid=0 2024-12-11T04:26:37,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741910_1086 (size=12301) 2024-12-11T04:26:37,899 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/8d0e9c0e9e3a4674b2543b623bbcdd96 2024-12-11T04:26:37,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/3546ec8a45fd4d92ba70990e363e0bda as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3546ec8a45fd4d92ba70990e363e0bda 2024-12-11T04:26:37,912 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891257911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891257913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891257916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891257917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891257917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:37,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3546ec8a45fd4d92ba70990e363e0bda, entries=250, sequenceid=332, filesize=16.8 K 2024-12-11T04:26:37,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/ac87311c50d647a2b730e854e6d41afe as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/ac87311c50d647a2b730e854e6d41afe 2024-12-11T04:26:37,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/ac87311c50d647a2b730e854e6d41afe, entries=150, sequenceid=332, filesize=12.0 K 2024-12-11T04:26:37,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/8d0e9c0e9e3a4674b2543b623bbcdd96 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/8d0e9c0e9e3a4674b2543b623bbcdd96 2024-12-11T04:26:37,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/8d0e9c0e9e3a4674b2543b623bbcdd96, entries=150, sequenceid=332, filesize=12.0 K 2024-12-11T04:26:37,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for c8c23c02526ae28f7a94d562fbd47bb4 in 152ms, sequenceid=332, compaction requested=false 2024-12-11T04:26:37,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:38,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:38,116 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T04:26:38,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:38,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:38,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:38,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:38,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:38,117 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:38,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5d32d2af5b324167b6459b5c85a66eb0 is 50, key is test_row_0/A:col10/1733891197805/Put/seqid=0 2024-12-11T04:26:38,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891258140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891258142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891258141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891258145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891258146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741911_1087 (size=12301) 2024-12-11T04:26:38,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5d32d2af5b324167b6459b5c85a66eb0 2024-12-11T04:26:38,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/e438cc89ec6f4d1baeca3e1c344fffa2 is 50, key is test_row_0/B:col10/1733891197805/Put/seqid=0 2024-12-11T04:26:38,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741912_1088 (size=12301) 2024-12-11T04:26:38,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/e438cc89ec6f4d1baeca3e1c344fffa2 2024-12-11T04:26:38,193 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/0c4fff1e9ce14f498eca8b0eb382abd2 is 50, key is test_row_0/C:col10/1733891197805/Put/seqid=0 2024-12-11T04:26:38,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-11T04:26:38,216 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-11T04:26:38,218 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:38,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-11T04:26:38,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-11T04:26:38,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741913_1089 (size=12301) 2024-12-11T04:26:38,225 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/0c4fff1e9ce14f498eca8b0eb382abd2 2024-12-11T04:26:38,225 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:38,227 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:38,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:38,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5d32d2af5b324167b6459b5c85a66eb0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5d32d2af5b324167b6459b5c85a66eb0 2024-12-11T04:26:38,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5d32d2af5b324167b6459b5c85a66eb0, entries=150, sequenceid=347, filesize=12.0 K 2024-12-11T04:26:38,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891258248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,250 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/e438cc89ec6f4d1baeca3e1c344fffa2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e438cc89ec6f4d1baeca3e1c344fffa2 2024-12-11T04:26:38,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891258248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891258248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891258249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891258250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e438cc89ec6f4d1baeca3e1c344fffa2, entries=150, sequenceid=347, filesize=12.0 K 2024-12-11T04:26:38,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/0c4fff1e9ce14f498eca8b0eb382abd2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/0c4fff1e9ce14f498eca8b0eb382abd2 2024-12-11T04:26:38,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/0c4fff1e9ce14f498eca8b0eb382abd2, entries=150, sequenceid=347, filesize=12.0 K 2024-12-11T04:26:38,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for c8c23c02526ae28f7a94d562fbd47bb4 in 150ms, sequenceid=347, compaction requested=true 2024-12-11T04:26:38,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:38,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:38,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:38,266 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:38,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:38,266 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:38,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:38,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:38,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:38,268 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:38,268 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42533 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:38,268 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:38,268 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:38,268 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:38,269 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:38,269 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/22b16a59a935484685592d9b1c734513, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3546ec8a45fd4d92ba70990e363e0bda, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5d32d2af5b324167b6459b5c85a66eb0] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=41.5 K 2024-12-11T04:26:38,269 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5ee4adbb838649be85d1a1441c8b159c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/ac87311c50d647a2b730e854e6d41afe, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e438cc89ec6f4d1baeca3e1c344fffa2] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=36.8 K 2024-12-11T04:26:38,269 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22b16a59a935484685592d9b1c734513, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733891195177 2024-12-11T04:26:38,269 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ee4adbb838649be85d1a1441c8b159c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733891195177 2024-12-11T04:26:38,270 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3546ec8a45fd4d92ba70990e363e0bda, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733891197469 2024-12-11T04:26:38,270 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ac87311c50d647a2b730e854e6d41afe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733891197471 2024-12-11T04:26:38,270 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d32d2af5b324167b6459b5c85a66eb0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733891197805 2024-12-11T04:26:38,271 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e438cc89ec6f4d1baeca3e1c344fffa2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733891197805 2024-12-11T04:26:38,290 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#75 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:38,291 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/ac229882ab8548459e7def2c7347b63d is 50, key is test_row_0/A:col10/1733891197805/Put/seqid=0 2024-12-11T04:26:38,293 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#76 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:38,294 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/3bdf2925d66e45ff9c71811aa2f8ade1 is 50, key is test_row_0/B:col10/1733891197805/Put/seqid=0 2024-12-11T04:26:38,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-11T04:26:38,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741914_1090 (size=13153) 2024-12-11T04:26:38,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741915_1091 (size=13153) 2024-12-11T04:26:38,348 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/3bdf2925d66e45ff9c71811aa2f8ade1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/3bdf2925d66e45ff9c71811aa2f8ade1 2024-12-11T04:26:38,359 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/ac229882ab8548459e7def2c7347b63d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ac229882ab8548459e7def2c7347b63d 2024-12-11T04:26:38,359 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into 3bdf2925d66e45ff9c71811aa2f8ade1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:38,359 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:38,359 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891198266; duration=0sec 2024-12-11T04:26:38,359 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:38,359 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:38,359 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:38,361 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:38,362 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:38,362 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:38,362 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e5e2af57af4f4843aeb06204980fbe72, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/8d0e9c0e9e3a4674b2543b623bbcdd96, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/0c4fff1e9ce14f498eca8b0eb382abd2] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=36.8 K 2024-12-11T04:26:38,363 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e5e2af57af4f4843aeb06204980fbe72, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=306, earliestPutTs=1733891195177 2024-12-11T04:26:38,364 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d0e9c0e9e3a4674b2543b623bbcdd96, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733891197471 2024-12-11T04:26:38,365 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c4fff1e9ce14f498eca8b0eb382abd2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733891197805 2024-12-11T04:26:38,367 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into ac229882ab8548459e7def2c7347b63d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:38,367 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:38,367 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891198266; duration=0sec 2024-12-11T04:26:38,368 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:38,368 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:38,380 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-11T04:26:38,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:38,382 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T04:26:38,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:38,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:38,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:38,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:38,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:38,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:38,383 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#77 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:38,386 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/6b43b8dfb2574000a6028092a880cf7b is 50, key is test_row_0/C:col10/1733891197805/Put/seqid=0 2024-12-11T04:26:38,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/f053ff63bc734d38a23eddf1ed5933b7 is 50, key is test_row_0/A:col10/1733891198141/Put/seqid=0 2024-12-11T04:26:38,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741917_1093 (size=12301) 2024-12-11T04:26:38,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741916_1092 (size=13153) 2024-12-11T04:26:38,441 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/f053ff63bc734d38a23eddf1ed5933b7 2024-12-11T04:26:38,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:38,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:38,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/4515034585ea416faf60217a34da1b1e is 50, key is test_row_0/B:col10/1733891198141/Put/seqid=0 2024-12-11T04:26:38,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891258465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891258463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891258466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891258468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891258468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741918_1094 (size=12301) 2024-12-11T04:26:38,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-11T04:26:38,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891258569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891258570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891258571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891258573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891258579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891258773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891258774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891258774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891258775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:38,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891258782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:38,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-11T04:26:38,849 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/6b43b8dfb2574000a6028092a880cf7b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6b43b8dfb2574000a6028092a880cf7b 2024-12-11T04:26:38,854 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into 6b43b8dfb2574000a6028092a880cf7b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:38,855 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:38,855 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891198267; duration=0sec 2024-12-11T04:26:38,855 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:38,855 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:38,883 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/4515034585ea416faf60217a34da1b1e 2024-12-11T04:26:38,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/6a933c73acfa41faaa1d609d1d2664f5 is 50, key is test_row_0/C:col10/1733891198141/Put/seqid=0 2024-12-11T04:26:38,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741919_1095 (size=12301) 2024-12-11T04:26:38,912 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/6a933c73acfa41faaa1d609d1d2664f5 2024-12-11T04:26:38,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/f053ff63bc734d38a23eddf1ed5933b7 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/f053ff63bc734d38a23eddf1ed5933b7 2024-12-11T04:26:38,938 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/f053ff63bc734d38a23eddf1ed5933b7, entries=150, sequenceid=371, filesize=12.0 K 2024-12-11T04:26:38,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/4515034585ea416faf60217a34da1b1e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/4515034585ea416faf60217a34da1b1e 2024-12-11T04:26:38,957 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/4515034585ea416faf60217a34da1b1e, entries=150, sequenceid=371, filesize=12.0 K 2024-12-11T04:26:38,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/6a933c73acfa41faaa1d609d1d2664f5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6a933c73acfa41faaa1d609d1d2664f5 2024-12-11T04:26:38,970 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6a933c73acfa41faaa1d609d1d2664f5, entries=150, sequenceid=371, filesize=12.0 K 2024-12-11T04:26:38,971 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for c8c23c02526ae28f7a94d562fbd47bb4 in 590ms, sequenceid=371, compaction requested=false 2024-12-11T04:26:38,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:38,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:38,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-11T04:26:38,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-11T04:26:38,976 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-11T04:26:38,976 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 746 msec 2024-12-11T04:26:38,979 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 759 msec 2024-12-11T04:26:39,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T04:26:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:39,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:39,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:39,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:39,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:39,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:39,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:39,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/370e5e22c8c449a6ac990be06388e217 is 50, key is test_row_0/A:col10/1733891198465/Put/seqid=0 2024-12-11T04:26:39,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891259105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891259105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891259107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891259110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891259111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741920_1096 (size=17181) 2024-12-11T04:26:39,122 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/370e5e22c8c449a6ac990be06388e217 2024-12-11T04:26:39,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/8f02a464fab34f33a9d295ad3168ebc1 is 50, key is test_row_0/B:col10/1733891198465/Put/seqid=0 2024-12-11T04:26:39,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741921_1097 (size=12301) 2024-12-11T04:26:39,158 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/8f02a464fab34f33a9d295ad3168ebc1 2024-12-11T04:26:39,183 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/44ef559838f64cdda3a09b98ea5c8728 is 50, key is test_row_0/C:col10/1733891198465/Put/seqid=0 2024-12-11T04:26:39,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891259214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891259214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891259215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891259217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891259217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741922_1098 (size=12301) 2024-12-11T04:26:39,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-11T04:26:39,325 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-11T04:26:39,326 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:39,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-11T04:26:39,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-11T04:26:39,328 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:39,329 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:39,329 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:39,424 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891259419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891259422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891259423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-11T04:26:39,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891259428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891259431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,482 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T04:26:39,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:39,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:39,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:39,484 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:39,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:39,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:39,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=388 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/44ef559838f64cdda3a09b98ea5c8728 2024-12-11T04:26:39,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-11T04:26:39,638 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T04:26:39,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:39,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:39,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:39,639 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:39,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:39,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:39,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/370e5e22c8c449a6ac990be06388e217 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/370e5e22c8c449a6ac990be06388e217 2024-12-11T04:26:39,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/370e5e22c8c449a6ac990be06388e217, entries=250, sequenceid=388, filesize=16.8 K 2024-12-11T04:26:39,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/8f02a464fab34f33a9d295ad3168ebc1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/8f02a464fab34f33a9d295ad3168ebc1 2024-12-11T04:26:39,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/8f02a464fab34f33a9d295ad3168ebc1, entries=150, sequenceid=388, filesize=12.0 K 2024-12-11T04:26:39,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/44ef559838f64cdda3a09b98ea5c8728 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/44ef559838f64cdda3a09b98ea5c8728 2024-12-11T04:26:39,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/44ef559838f64cdda3a09b98ea5c8728, entries=150, sequenceid=388, filesize=12.0 K 2024-12-11T04:26:39,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for c8c23c02526ae28f7a94d562fbd47bb4 in 590ms, sequenceid=388, compaction requested=true 2024-12-11T04:26:39,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:39,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:39,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:39,671 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:39,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:39,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:39,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:39,671 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:39,671 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:39,672 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42635 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:39,672 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:39,672 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:39,672 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:39,673 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:39,673 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:39,673 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ac229882ab8548459e7def2c7347b63d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/f053ff63bc734d38a23eddf1ed5933b7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/370e5e22c8c449a6ac990be06388e217] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=41.6 K 2024-12-11T04:26:39,673 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/3bdf2925d66e45ff9c71811aa2f8ade1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/4515034585ea416faf60217a34da1b1e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/8f02a464fab34f33a9d295ad3168ebc1] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=36.9 K 2024-12-11T04:26:39,673 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac229882ab8548459e7def2c7347b63d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733891197805 2024-12-11T04:26:39,673 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bdf2925d66e45ff9c71811aa2f8ade1, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733891197805 2024-12-11T04:26:39,674 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 4515034585ea416faf60217a34da1b1e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733891198139 2024-12-11T04:26:39,674 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting f053ff63bc734d38a23eddf1ed5933b7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733891198139 2024-12-11T04:26:39,674 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f02a464fab34f33a9d295ad3168ebc1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1733891198464 2024-12-11T04:26:39,674 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 370e5e22c8c449a6ac990be06388e217, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1733891198464 2024-12-11T04:26:39,690 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#84 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:39,690 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/8881ba1ba42048dfa841b9b5638a8ddd is 50, key is test_row_0/A:col10/1733891198465/Put/seqid=0 2024-12-11T04:26:39,694 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#85 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:39,694 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/171be08ec3cd4825b6fb59d93d2737ce is 50, key is test_row_0/B:col10/1733891198465/Put/seqid=0 2024-12-11T04:26:39,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741924_1100 (size=13255) 2024-12-11T04:26:39,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:39,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-11T04:26:39,731 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:39,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:39,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:39,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:39,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:39,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:39,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741923_1099 (size=13255) 2024-12-11T04:26:39,745 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/8881ba1ba42048dfa841b9b5638a8ddd as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/8881ba1ba42048dfa841b9b5638a8ddd 2024-12-11T04:26:39,753 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into 8881ba1ba42048dfa841b9b5638a8ddd(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:39,753 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:39,753 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891199670; duration=0sec 2024-12-11T04:26:39,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/949d3281201b418b88da9e2e060955ac is 50, key is test_row_0/A:col10/1733891199101/Put/seqid=0 2024-12-11T04:26:39,754 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:39,754 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:39,754 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:39,757 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:39,757 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:39,757 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:39,757 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6b43b8dfb2574000a6028092a880cf7b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6a933c73acfa41faaa1d609d1d2664f5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/44ef559838f64cdda3a09b98ea5c8728] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=36.9 K 2024-12-11T04:26:39,757 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b43b8dfb2574000a6028092a880cf7b, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733891197805 2024-12-11T04:26:39,758 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a933c73acfa41faaa1d609d1d2664f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1733891198139 2024-12-11T04:26:39,758 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44ef559838f64cdda3a09b98ea5c8728, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1733891198464 2024-12-11T04:26:39,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891259755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891259756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891259760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891259763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891259764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741925_1101 (size=12301) 2024-12-11T04:26:39,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/949d3281201b418b88da9e2e060955ac 2024-12-11T04:26:39,792 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T04:26:39,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:39,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:39,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:39,793 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:39,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:39,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:39,802 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#87 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:39,803 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/ea100281f3434bfbb20592f00b1dc13e is 50, key is test_row_0/C:col10/1733891198465/Put/seqid=0 2024-12-11T04:26:39,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/5f152bc98570425d8b6ce8435dccd99c is 50, key is test_row_0/B:col10/1733891199101/Put/seqid=0 2024-12-11T04:26:39,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741927_1103 (size=12301) 2024-12-11T04:26:39,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/5f152bc98570425d8b6ce8435dccd99c 2024-12-11T04:26:39,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741926_1102 (size=13255) 2024-12-11T04:26:39,861 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/e72cb566d06440faab8b79819a1a31a4 is 50, key is test_row_0/C:col10/1733891199101/Put/seqid=0 2024-12-11T04:26:39,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891259865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891259865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891259866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891259868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:39,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891259869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741928_1104 (size=12301) 2024-12-11T04:26:39,916 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/e72cb566d06440faab8b79819a1a31a4 2024-12-11T04:26:39,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/949d3281201b418b88da9e2e060955ac as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/949d3281201b418b88da9e2e060955ac 2024-12-11T04:26:39,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-11T04:26:39,932 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/949d3281201b418b88da9e2e060955ac, entries=150, sequenceid=409, filesize=12.0 K 2024-12-11T04:26:39,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/5f152bc98570425d8b6ce8435dccd99c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5f152bc98570425d8b6ce8435dccd99c 2024-12-11T04:26:39,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5f152bc98570425d8b6ce8435dccd99c, entries=150, sequenceid=409, filesize=12.0 K 2024-12-11T04:26:39,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/e72cb566d06440faab8b79819a1a31a4 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e72cb566d06440faab8b79819a1a31a4 2024-12-11T04:26:39,946 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:39,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T04:26:39,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:39,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:39,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:39,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:39,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:39,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:39,948 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e72cb566d06440faab8b79819a1a31a4, entries=150, sequenceid=409, filesize=12.0 K 2024-12-11T04:26:39,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for c8c23c02526ae28f7a94d562fbd47bb4 in 218ms, sequenceid=409, compaction requested=false 2024-12-11T04:26:39,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:40,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:40,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-11T04:26:40,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:40,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:40,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:40,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:40,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:40,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:40,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/47f259cd0cce4ac4b66fddc98f4d804a is 50, key is test_row_0/A:col10/1733891200075/Put/seqid=0 2024-12-11T04:26:40,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891260091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741929_1105 (size=12301) 2024-12-11T04:26:40,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891260093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/47f259cd0cce4ac4b66fddc98f4d804a 2024-12-11T04:26:40,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891260094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,100 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891260095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T04:26:40,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:40,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,101 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:40,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:40,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891260098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:40,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/bcb3061d98d94492a7aec954b044b1a0 is 50, key is test_row_0/B:col10/1733891200075/Put/seqid=0 2024-12-11T04:26:40,130 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/171be08ec3cd4825b6fb59d93d2737ce as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/171be08ec3cd4825b6fb59d93d2737ce 2024-12-11T04:26:40,138 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into 171be08ec3cd4825b6fb59d93d2737ce(size=12.9 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:40,138 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:40,138 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891199671; duration=0sec 2024-12-11T04:26:40,138 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:40,138 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:40,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741930_1106 (size=12301) 2024-12-11T04:26:40,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/bcb3061d98d94492a7aec954b044b1a0 2024-12-11T04:26:40,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/930fb8c231494fe886774c0419ebb3a1 is 50, key is test_row_0/C:col10/1733891200075/Put/seqid=0 2024-12-11T04:26:40,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741931_1107 (size=12301) 2024-12-11T04:26:40,183 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/930fb8c231494fe886774c0419ebb3a1 2024-12-11T04:26:40,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/47f259cd0cce4ac4b66fddc98f4d804a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/47f259cd0cce4ac4b66fddc98f4d804a 2024-12-11T04:26:40,200 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/47f259cd0cce4ac4b66fddc98f4d804a, entries=150, sequenceid=430, filesize=12.0 K 2024-12-11T04:26:40,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/bcb3061d98d94492a7aec954b044b1a0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/bcb3061d98d94492a7aec954b044b1a0 2024-12-11T04:26:40,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891260199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891260200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891260201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891260203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891260204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,221 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/bcb3061d98d94492a7aec954b044b1a0, entries=150, sequenceid=430, filesize=12.0 K 2024-12-11T04:26:40,224 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/930fb8c231494fe886774c0419ebb3a1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/930fb8c231494fe886774c0419ebb3a1 2024-12-11T04:26:40,232 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/930fb8c231494fe886774c0419ebb3a1, entries=150, sequenceid=430, filesize=12.0 K 2024-12-11T04:26:40,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for c8c23c02526ae28f7a94d562fbd47bb4 in 158ms, sequenceid=430, compaction requested=true 2024-12-11T04:26:40,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:40,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:40,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:40,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:40,233 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:40,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:40,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:40,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-11T04:26:40,235 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:40,236 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:40,236 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,236 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/8881ba1ba42048dfa841b9b5638a8ddd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/949d3281201b418b88da9e2e060955ac, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/47f259cd0cce4ac4b66fddc98f4d804a] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.0 K 2024-12-11T04:26:40,237 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 8881ba1ba42048dfa841b9b5638a8ddd, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1733891198464 2024-12-11T04:26:40,237 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 949d3281201b418b88da9e2e060955ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1733891199101 2024-12-11T04:26:40,238 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 47f259cd0cce4ac4b66fddc98f4d804a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733891200072 2024-12-11T04:26:40,255 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-11T04:26:40,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,257 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-11T04:26:40,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:40,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:40,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:40,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:40,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:40,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:40,267 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#93 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:40,267 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/ed1bdad3af904ad7bfdbf4ff98f29ad1 is 50, key is test_row_0/A:col10/1733891200075/Put/seqid=0 2024-12-11T04:26:40,269 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/ea100281f3434bfbb20592f00b1dc13e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ea100281f3434bfbb20592f00b1dc13e 2024-12-11T04:26:40,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/611537c2b66944a3a46a087a5189caf3 is 50, key is test_row_0/A:col10/1733891200091/Put/seqid=0 2024-12-11T04:26:40,286 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into ea100281f3434bfbb20592f00b1dc13e(size=12.9 K), total size for store is 37.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:40,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741932_1108 (size=13357) 2024-12-11T04:26:40,286 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:40,286 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891199671; duration=0sec 2024-12-11T04:26:40,286 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-11T04:26:40,286 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:40,286 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:40,286 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:40,287 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:40,287 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:40,288 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,288 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/171be08ec3cd4825b6fb59d93d2737ce, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5f152bc98570425d8b6ce8435dccd99c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/bcb3061d98d94492a7aec954b044b1a0] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.0 K 2024-12-11T04:26:40,288 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 171be08ec3cd4825b6fb59d93d2737ce, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1733891198464 2024-12-11T04:26:40,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741933_1109 (size=12301) 2024-12-11T04:26:40,289 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f152bc98570425d8b6ce8435dccd99c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1733891199101 2024-12-11T04:26:40,290 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/611537c2b66944a3a46a087a5189caf3 2024-12-11T04:26:40,290 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcb3061d98d94492a7aec954b044b1a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733891200072 2024-12-11T04:26:40,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/11f4569bbc694ee09d9aa9279759f41a is 50, key is test_row_0/B:col10/1733891200091/Put/seqid=0 2024-12-11T04:26:40,306 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#96 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:40,307 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/46b15beb62a04dbfbf542714d7731bf3 is 50, key is test_row_0/B:col10/1733891200075/Put/seqid=0 2024-12-11T04:26:40,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741934_1110 (size=12301) 2024-12-11T04:26:40,331 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/11f4569bbc694ee09d9aa9279759f41a 2024-12-11T04:26:40,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741935_1111 (size=13357) 2024-12-11T04:26:40,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/6359afd497e84fb2a12817d39fd1aac6 is 50, key is test_row_0/C:col10/1733891200091/Put/seqid=0 2024-12-11T04:26:40,348 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/46b15beb62a04dbfbf542714d7731bf3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/46b15beb62a04dbfbf542714d7731bf3 2024-12-11T04:26:40,355 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into 46b15beb62a04dbfbf542714d7731bf3(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:40,355 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:40,355 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891200233; duration=0sec 2024-12-11T04:26:40,356 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:40,356 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:40,356 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:40,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741936_1112 (size=12301) 2024-12-11T04:26:40,358 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:40,359 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:40,359 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,359 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ea100281f3434bfbb20592f00b1dc13e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e72cb566d06440faab8b79819a1a31a4, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/930fb8c231494fe886774c0419ebb3a1] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.0 K 2024-12-11T04:26:40,359 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea100281f3434bfbb20592f00b1dc13e, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=388, earliestPutTs=1733891198464 2024-12-11T04:26:40,360 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting e72cb566d06440faab8b79819a1a31a4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=409, earliestPutTs=1733891199101 2024-12-11T04:26:40,360 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 930fb8c231494fe886774c0419ebb3a1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733891200072 2024-12-11T04:26:40,364 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/6359afd497e84fb2a12817d39fd1aac6 2024-12-11T04:26:40,371 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#98 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:40,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/611537c2b66944a3a46a087a5189caf3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/611537c2b66944a3a46a087a5189caf3 2024-12-11T04:26:40,372 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/cb0b40e5fc0842d5acf4f5e42912330c is 50, key is test_row_0/C:col10/1733891200075/Put/seqid=0 2024-12-11T04:26:40,379 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/611537c2b66944a3a46a087a5189caf3, entries=150, sequenceid=447, filesize=12.0 K 2024-12-11T04:26:40,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/11f4569bbc694ee09d9aa9279759f41a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/11f4569bbc694ee09d9aa9279759f41a 2024-12-11T04:26:40,386 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/11f4569bbc694ee09d9aa9279759f41a, entries=150, sequenceid=447, filesize=12.0 K 2024-12-11T04:26:40,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/6359afd497e84fb2a12817d39fd1aac6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6359afd497e84fb2a12817d39fd1aac6 2024-12-11T04:26:40,395 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6359afd497e84fb2a12817d39fd1aac6, entries=150, sequenceid=447, filesize=12.0 K 2024-12-11T04:26:40,396 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=0 B/0 for c8c23c02526ae28f7a94d562fbd47bb4 in 140ms, sequenceid=447, compaction requested=false 2024-12-11T04:26:40,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:40,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-11T04:26:40,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-11T04:26:40,400 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-11T04:26:40,401 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0700 sec 2024-12-11T04:26:40,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.0750 sec 2024-12-11T04:26:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741937_1113 (size=13357) 2024-12-11T04:26:40,418 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:26:40,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:40,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:40,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:40,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:40,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:40,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:40,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:40,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-11T04:26:40,432 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-11T04:26:40,433 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:40,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-11T04:26:40,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-11T04:26:40,435 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:40,436 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:40,436 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:40,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/4ea1755ea8cd48ce9944780bfe4e13f7 is 50, key is test_row_0/A:col10/1733891200412/Put/seqid=0 2024-12-11T04:26:40,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741938_1114 (size=19621) 2024-12-11T04:26:40,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891260490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891260494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891260497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891260498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891260505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-11T04:26:40,588 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T04:26:40,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:40,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,589 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:40,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:40,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:40,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891260600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891260603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891260603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891260604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891260608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,692 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/ed1bdad3af904ad7bfdbf4ff98f29ad1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ed1bdad3af904ad7bfdbf4ff98f29ad1 2024-12-11T04:26:40,698 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into ed1bdad3af904ad7bfdbf4ff98f29ad1(size=13.0 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:40,698 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:40,698 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891200233; duration=0sec 2024-12-11T04:26:40,698 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:40,698 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:40,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-11T04:26:40,742 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T04:26:40,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:40,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,744 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:40,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:40,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891260803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891260807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891260808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,812 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/cb0b40e5fc0842d5acf4f5e42912330c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cb0b40e5fc0842d5acf4f5e42912330c 2024-12-11T04:26:40,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:40,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891260810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891260808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,820 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into cb0b40e5fc0842d5acf4f5e42912330c(size=13.0 K), total size for store is 25.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:40,820 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:40,820 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891200233; duration=0sec 2024-12-11T04:26:40,820 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:40,820 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:40,855 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/4ea1755ea8cd48ce9944780bfe4e13f7 2024-12-11T04:26:40,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/b52140832a89462f8720c41949635206 is 50, key is test_row_0/B:col10/1733891200412/Put/seqid=0 2024-12-11T04:26:40,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741939_1115 (size=12301) 2024-12-11T04:26:40,903 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:40,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T04:26:40,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:40,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:40,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:40,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:40,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:41,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-11T04:26:41,056 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T04:26:41,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:41,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:41,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:41,057 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:41,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:41,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:41,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891261107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891261111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891261114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891261116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891261123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,210 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T04:26:41,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:41,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:41,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:41,211 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:41,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:41,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:41,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/b52140832a89462f8720c41949635206 2024-12-11T04:26:41,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/14fb264f38234e8eaab67cf3e50d4bdb is 50, key is test_row_0/C:col10/1733891200412/Put/seqid=0 2024-12-11T04:26:41,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741940_1116 (size=12301) 2024-12-11T04:26:41,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/14fb264f38234e8eaab67cf3e50d4bdb 2024-12-11T04:26:41,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/4ea1755ea8cd48ce9944780bfe4e13f7 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/4ea1755ea8cd48ce9944780bfe4e13f7 2024-12-11T04:26:41,364 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/4ea1755ea8cd48ce9944780bfe4e13f7, entries=300, sequenceid=460, filesize=19.2 K 2024-12-11T04:26:41,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T04:26:41,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:41,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:41,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:41,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:41,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:41,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/b52140832a89462f8720c41949635206 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b52140832a89462f8720c41949635206 2024-12-11T04:26:41,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:41,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b52140832a89462f8720c41949635206, entries=150, sequenceid=460, filesize=12.0 K 2024-12-11T04:26:41,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/14fb264f38234e8eaab67cf3e50d4bdb as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/14fb264f38234e8eaab67cf3e50d4bdb 2024-12-11T04:26:41,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/14fb264f38234e8eaab67cf3e50d4bdb, entries=150, sequenceid=460, filesize=12.0 K 2024-12-11T04:26:41,380 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for c8c23c02526ae28f7a94d562fbd47bb4 in 963ms, sequenceid=460, compaction requested=true 2024-12-11T04:26:41,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:41,380 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:41,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:41,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:41,381 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:41,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:41,382 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45279 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:41,382 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:41,382 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:41,382 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ed1bdad3af904ad7bfdbf4ff98f29ad1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/611537c2b66944a3a46a087a5189caf3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/4ea1755ea8cd48ce9944780bfe4e13f7] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=44.2 K 2024-12-11T04:26:41,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:41,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:41,383 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:41,383 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed1bdad3af904ad7bfdbf4ff98f29ad1, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733891200072 2024-12-11T04:26:41,384 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:41,384 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:41,384 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:41,384 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/46b15beb62a04dbfbf542714d7731bf3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/11f4569bbc694ee09d9aa9279759f41a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b52140832a89462f8720c41949635206] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.1 K 2024-12-11T04:26:41,385 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 611537c2b66944a3a46a087a5189caf3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1733891200091 2024-12-11T04:26:41,385 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 46b15beb62a04dbfbf542714d7731bf3, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733891200072 2024-12-11T04:26:41,385 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ea1755ea8cd48ce9944780bfe4e13f7, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733891200411 2024-12-11T04:26:41,386 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 11f4569bbc694ee09d9aa9279759f41a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1733891200091 2024-12-11T04:26:41,387 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting b52140832a89462f8720c41949635206, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733891200411 2024-12-11T04:26:41,400 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#102 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:41,401 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/98e39b59b6c643d288957f94c2a5c356 is 50, key is test_row_0/B:col10/1733891200412/Put/seqid=0 2024-12-11T04:26:41,401 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#103 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:41,402 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/7062aa885bec45ba8aa4ca029cd52b8c is 50, key is test_row_0/A:col10/1733891200412/Put/seqid=0 2024-12-11T04:26:41,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741941_1117 (size=13459) 2024-12-11T04:26:41,418 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/98e39b59b6c643d288957f94c2a5c356 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/98e39b59b6c643d288957f94c2a5c356 2024-12-11T04:26:41,426 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into 98e39b59b6c643d288957f94c2a5c356(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:41,426 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:41,426 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891201381; duration=0sec 2024-12-11T04:26:41,426 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:41,426 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:41,427 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:41,429 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:41,430 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:41,430 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:41,430 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cb0b40e5fc0842d5acf4f5e42912330c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6359afd497e84fb2a12817d39fd1aac6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/14fb264f38234e8eaab67cf3e50d4bdb] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.1 K 2024-12-11T04:26:41,430 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting cb0b40e5fc0842d5acf4f5e42912330c, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733891200072 2024-12-11T04:26:41,431 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 6359afd497e84fb2a12817d39fd1aac6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1733891200091 2024-12-11T04:26:41,432 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 14fb264f38234e8eaab67cf3e50d4bdb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733891200411 2024-12-11T04:26:41,445 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#104 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:41,447 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/a7cb90db28f84d6b9c6f480327adc6dd is 50, key is test_row_0/C:col10/1733891200412/Put/seqid=0 2024-12-11T04:26:41,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741942_1118 (size=13459) 2024-12-11T04:26:41,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741943_1119 (size=13459) 2024-12-11T04:26:41,471 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/a7cb90db28f84d6b9c6f480327adc6dd as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/a7cb90db28f84d6b9c6f480327adc6dd 2024-12-11T04:26:41,480 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into a7cb90db28f84d6b9c6f480327adc6dd(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:41,480 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:41,481 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891201383; duration=0sec 2024-12-11T04:26:41,481 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:41,481 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:41,519 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-11T04:26:41,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:41,521 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-11T04:26:41,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:41,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:41,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:41,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:41,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:41,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:41,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/e0940d4f79b54830986c57e7c4e0f601 is 50, key is test_row_0/A:col10/1733891200492/Put/seqid=0 2024-12-11T04:26:41,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741944_1120 (size=12301) 2024-12-11T04:26:41,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-11T04:26:41,541 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/e0940d4f79b54830986c57e7c4e0f601 2024-12-11T04:26:41,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/5eed8d8f7b654d60b49a474b773b51d7 is 50, key is test_row_0/B:col10/1733891200492/Put/seqid=0 2024-12-11T04:26:41,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741945_1121 (size=12301) 2024-12-11T04:26:41,601 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/5eed8d8f7b654d60b49a474b773b51d7 2024-12-11T04:26:41,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:41,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:41,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/82fcf54a2dff4e50acdd2e8923c98733 is 50, key is test_row_0/C:col10/1733891200492/Put/seqid=0 2024-12-11T04:26:41,629 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891261627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891261628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891261629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891261629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891261630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741946_1122 (size=12301) 2024-12-11T04:26:41,639 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/82fcf54a2dff4e50acdd2e8923c98733 2024-12-11T04:26:41,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/e0940d4f79b54830986c57e7c4e0f601 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e0940d4f79b54830986c57e7c4e0f601 2024-12-11T04:26:41,653 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e0940d4f79b54830986c57e7c4e0f601, entries=150, sequenceid=490, filesize=12.0 K 2024-12-11T04:26:41,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/5eed8d8f7b654d60b49a474b773b51d7 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5eed8d8f7b654d60b49a474b773b51d7 2024-12-11T04:26:41,662 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5eed8d8f7b654d60b49a474b773b51d7, entries=150, sequenceid=490, filesize=12.0 K 2024-12-11T04:26:41,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/82fcf54a2dff4e50acdd2e8923c98733 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/82fcf54a2dff4e50acdd2e8923c98733 2024-12-11T04:26:41,671 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/82fcf54a2dff4e50acdd2e8923c98733, entries=150, sequenceid=490, filesize=12.0 K 2024-12-11T04:26:41,672 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for c8c23c02526ae28f7a94d562fbd47bb4 in 151ms, sequenceid=490, compaction requested=false 2024-12-11T04:26:41,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:41,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:41,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-11T04:26:41,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-11T04:26:41,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-11T04:26:41,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2380 sec 2024-12-11T04:26:41,678 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.2430 sec 2024-12-11T04:26:41,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:41,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:26:41,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:41,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:41,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:41,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:41,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:41,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:41,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/a79b762507144a72b10569ee21a33367 is 50, key is test_row_0/A:col10/1733891201734/Put/seqid=0 2024-12-11T04:26:41,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741947_1123 (size=12301) 2024-12-11T04:26:41,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/a79b762507144a72b10569ee21a33367 2024-12-11T04:26:41,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/65f1620645df4b9eab61d7a67127e464 is 50, key is test_row_0/B:col10/1733891201734/Put/seqid=0 2024-12-11T04:26:41,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891261776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891261776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891261777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741948_1124 (size=12301) 2024-12-11T04:26:41,869 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/7062aa885bec45ba8aa4ca029cd52b8c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/7062aa885bec45ba8aa4ca029cd52b8c 2024-12-11T04:26:41,875 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into 7062aa885bec45ba8aa4ca029cd52b8c(size=13.1 K), total size for store is 25.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:41,875 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:41,876 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891201380; duration=0sec 2024-12-11T04:26:41,876 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:41,876 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:41,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891261878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891261881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:41,883 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:41,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891261881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:42,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:42,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891262081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:42,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:42,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891262086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:42,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:42,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891262085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:42,184 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/65f1620645df4b9eab61d7a67127e464 2024-12-11T04:26:42,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/cfad4bcb3aaa40a7ae35d8d7af82cc40 is 50, key is test_row_0/C:col10/1733891201734/Put/seqid=0 2024-12-11T04:26:42,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741949_1125 (size=12301) 2024-12-11T04:26:42,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:42,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891262387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:42,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:42,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891262389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:42,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:42,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891262389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:42,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-11T04:26:42,542 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-11T04:26:42,544 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:42,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-11T04:26:42,546 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:42,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-11T04:26:42,555 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:42,555 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:42,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/cfad4bcb3aaa40a7ae35d8d7af82cc40 2024-12-11T04:26:42,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/a79b762507144a72b10569ee21a33367 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/a79b762507144a72b10569ee21a33367 2024-12-11T04:26:42,623 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/a79b762507144a72b10569ee21a33367, entries=150, sequenceid=502, filesize=12.0 K 2024-12-11T04:26:42,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/65f1620645df4b9eab61d7a67127e464 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/65f1620645df4b9eab61d7a67127e464 2024-12-11T04:26:42,629 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/65f1620645df4b9eab61d7a67127e464, entries=150, sequenceid=502, filesize=12.0 K 2024-12-11T04:26:42,630 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/cfad4bcb3aaa40a7ae35d8d7af82cc40 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cfad4bcb3aaa40a7ae35d8d7af82cc40 2024-12-11T04:26:42,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:42,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891262632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:42,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:42,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891262633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:42,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cfad4bcb3aaa40a7ae35d8d7af82cc40, entries=150, sequenceid=502, filesize=12.0 K 2024-12-11T04:26:42,637 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for c8c23c02526ae28f7a94d562fbd47bb4 in 903ms, sequenceid=502, compaction requested=true 2024-12-11T04:26:42,637 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:42,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:42,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:42,638 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:42,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:42,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:42,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:42,638 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:42,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:42,639 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:42,639 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:42,639 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:42,639 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:42,639 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:42,639 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:42,639 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/98e39b59b6c643d288957f94c2a5c356, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5eed8d8f7b654d60b49a474b773b51d7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/65f1620645df4b9eab61d7a67127e464] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.2 K 2024-12-11T04:26:42,639 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/7062aa885bec45ba8aa4ca029cd52b8c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e0940d4f79b54830986c57e7c4e0f601, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/a79b762507144a72b10569ee21a33367] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.2 K 2024-12-11T04:26:42,639 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 98e39b59b6c643d288957f94c2a5c356, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733891200411 2024-12-11T04:26:42,639 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7062aa885bec45ba8aa4ca029cd52b8c, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733891200411 2024-12-11T04:26:42,640 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0940d4f79b54830986c57e7c4e0f601, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1733891200492 2024-12-11T04:26:42,640 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5eed8d8f7b654d60b49a474b773b51d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1733891200492 2024-12-11T04:26:42,640 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 65f1620645df4b9eab61d7a67127e464, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1733891201627 2024-12-11T04:26:42,640 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting a79b762507144a72b10569ee21a33367, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1733891201627 2024-12-11T04:26:42,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-11T04:26:42,652 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#111 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:42,652 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/534dabca661a4477a8ca03c8ed11b6c8 is 50, key is test_row_0/B:col10/1733891201734/Put/seqid=0 2024-12-11T04:26:42,658 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#112 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:42,658 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/c6a68583acaf4af5941589ddcdc5f2d1 is 50, key is test_row_0/A:col10/1733891201734/Put/seqid=0 2024-12-11T04:26:42,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741950_1126 (size=13561) 2024-12-11T04:26:42,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741951_1127 (size=13561) 2024-12-11T04:26:42,675 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/c6a68583acaf4af5941589ddcdc5f2d1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/c6a68583acaf4af5941589ddcdc5f2d1 2024-12-11T04:26:42,681 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into c6a68583acaf4af5941589ddcdc5f2d1(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:42,681 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:42,681 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891202637; duration=0sec 2024-12-11T04:26:42,682 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:42,682 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:42,682 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:42,683 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:42,683 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:42,683 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:42,683 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/a7cb90db28f84d6b9c6f480327adc6dd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/82fcf54a2dff4e50acdd2e8923c98733, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cfad4bcb3aaa40a7ae35d8d7af82cc40] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.2 K 2024-12-11T04:26:42,684 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7cb90db28f84d6b9c6f480327adc6dd, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1733891200411 2024-12-11T04:26:42,684 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82fcf54a2dff4e50acdd2e8923c98733, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1733891200492 2024-12-11T04:26:42,685 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfad4bcb3aaa40a7ae35d8d7af82cc40, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1733891201627 2024-12-11T04:26:42,695 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#113 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:42,696 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/616d903a6ff14478a079b9a783d5b0d3 is 50, key is test_row_0/C:col10/1733891201734/Put/seqid=0 2024-12-11T04:26:42,707 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:42,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-11T04:26:42,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:42,708 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T04:26:42,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:42,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:42,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:42,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:42,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:42,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:42,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/b82e713f1c4842d5886f4459cf8542a0 is 50, key is test_row_0/A:col10/1733891201775/Put/seqid=0 2024-12-11T04:26:42,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741952_1128 (size=13561) 2024-12-11T04:26:42,747 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/616d903a6ff14478a079b9a783d5b0d3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/616d903a6ff14478a079b9a783d5b0d3 2024-12-11T04:26:42,758 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into 616d903a6ff14478a079b9a783d5b0d3(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:42,758 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:42,758 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891202638; duration=0sec 2024-12-11T04:26:42,758 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:42,758 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:42,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741953_1129 (size=12301) 2024-12-11T04:26:42,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-11T04:26:42,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:42,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:42,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:42,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891262901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:42,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891262902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:42,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891262902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:43,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:43,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891263005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:43,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:43,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:43,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891263005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:43,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891263005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:43,072 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/534dabca661a4477a8ca03c8ed11b6c8 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/534dabca661a4477a8ca03c8ed11b6c8 2024-12-11T04:26:43,078 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into 534dabca661a4477a8ca03c8ed11b6c8(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:43,078 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:43,078 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891202638; duration=0sec 2024-12-11T04:26:43,078 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:43,078 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:43,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-11T04:26:43,163 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/b82e713f1c4842d5886f4459cf8542a0 2024-12-11T04:26:43,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/07074c694ed04a83bd2540da86a5ce5f is 50, key is test_row_0/B:col10/1733891201775/Put/seqid=0 2024-12-11T04:26:43,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741954_1130 (size=12301) 2024-12-11T04:26:43,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:43,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891263207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:43,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:43,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891263208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:43,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:43,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891263208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:43,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891263510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:43,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891263510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:43,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891263511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:43,579 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/07074c694ed04a83bd2540da86a5ce5f 2024-12-11T04:26:43,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/c4af1f1bb2aa4983ac14c3dc2f0ba2fd is 50, key is test_row_0/C:col10/1733891201775/Put/seqid=0 2024-12-11T04:26:43,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741955_1131 (size=12301) 2024-12-11T04:26:43,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-11T04:26:43,996 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/c4af1f1bb2aa4983ac14c3dc2f0ba2fd 2024-12-11T04:26:44,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/b82e713f1c4842d5886f4459cf8542a0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/b82e713f1c4842d5886f4459cf8542a0 2024-12-11T04:26:44,010 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/b82e713f1c4842d5886f4459cf8542a0, entries=150, sequenceid=529, filesize=12.0 K 2024-12-11T04:26:44,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/07074c694ed04a83bd2540da86a5ce5f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/07074c694ed04a83bd2540da86a5ce5f 2024-12-11T04:26:44,014 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:44,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891264013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:44,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:44,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891264014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:44,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:44,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891264014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:44,017 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/07074c694ed04a83bd2540da86a5ce5f, entries=150, sequenceid=529, filesize=12.0 K 2024-12-11T04:26:44,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/c4af1f1bb2aa4983ac14c3dc2f0ba2fd as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/c4af1f1bb2aa4983ac14c3dc2f0ba2fd 2024-12-11T04:26:44,024 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/c4af1f1bb2aa4983ac14c3dc2f0ba2fd, entries=150, sequenceid=529, filesize=12.0 K 2024-12-11T04:26:44,026 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c8c23c02526ae28f7a94d562fbd47bb4 in 1317ms, sequenceid=529, compaction requested=false 2024-12-11T04:26:44,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:44,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:44,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-11T04:26:44,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-11T04:26:44,029 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-11T04:26:44,029 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4720 sec 2024-12-11T04:26:44,031 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.4860 sec 2024-12-11T04:26:44,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:44,641 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T04:26:44,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:44,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:44,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:44,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:44,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:44,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:44,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-11T04:26:44,654 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-11T04:26:44,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/3d9e300d3a2b43fa964d35561cff1032 is 50, key is test_row_0/A:col10/1733891202900/Put/seqid=0 2024-12-11T04:26:44,655 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:44,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-12-11T04:26:44,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-11T04:26:44,658 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:44,659 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:44,659 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:44,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741956_1132 (size=12301) 2024-12-11T04:26:44,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:44,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891264685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:44,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:44,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891264686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:44,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-11T04:26:44,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:44,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:44,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891264789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:44,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891264789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:44,811 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:44,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-11T04:26:44,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:44,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:44,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:44,812 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:44,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:44,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:44,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-11T04:26:44,965 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:44,965 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-11T04:26:44,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:44,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:44,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:44,966 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:44,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:44,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:44,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:44,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891264991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:44,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:44,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891264992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:45,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39136 deadline: 1733891265017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,019 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:45,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39120 deadline: 1733891265017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:45,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39154 deadline: 1733891265022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=543 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/3d9e300d3a2b43fa964d35561cff1032 2024-12-11T04:26:45,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/1291dcb28637487e823be0b88890650c is 50, key is test_row_0/B:col10/1733891202900/Put/seqid=0 2024-12-11T04:26:45,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741957_1133 (size=12301) 2024-12-11T04:26:45,086 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=543 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/1291dcb28637487e823be0b88890650c 2024-12-11T04:26:45,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/f5a0d1a803a04b93b396e9bc5af632b1 is 50, key is test_row_0/C:col10/1733891202900/Put/seqid=0 2024-12-11T04:26:45,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741958_1134 (size=12301) 2024-12-11T04:26:45,118 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-11T04:26:45,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:45,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,119 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:45,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:45,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:45,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-11T04:26:45,271 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-11T04:26:45,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:45,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:45,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:45,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:45,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:45,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891265293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:45,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891265294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,424 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-11T04:26:45,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:45,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:45,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:45,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:45,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=543 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/f5a0d1a803a04b93b396e9bc5af632b1 2024-12-11T04:26:45,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/3d9e300d3a2b43fa964d35561cff1032 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3d9e300d3a2b43fa964d35561cff1032 2024-12-11T04:26:45,510 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3d9e300d3a2b43fa964d35561cff1032, entries=150, sequenceid=543, filesize=12.0 K 2024-12-11T04:26:45,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/1291dcb28637487e823be0b88890650c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1291dcb28637487e823be0b88890650c 2024-12-11T04:26:45,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1291dcb28637487e823be0b88890650c, entries=150, sequenceid=543, filesize=12.0 K 2024-12-11T04:26:45,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/f5a0d1a803a04b93b396e9bc5af632b1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/f5a0d1a803a04b93b396e9bc5af632b1 2024-12-11T04:26:45,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/f5a0d1a803a04b93b396e9bc5af632b1, entries=150, sequenceid=543, filesize=12.0 K 2024-12-11T04:26:45,524 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c8c23c02526ae28f7a94d562fbd47bb4 in 883ms, sequenceid=543, compaction requested=true 2024-12-11T04:26:45,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:45,524 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:45,524 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:45,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:45,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:45,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:45,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:45,525 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:45,525 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:45,525 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:45,526 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:45,526 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,526 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/c6a68583acaf4af5941589ddcdc5f2d1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/b82e713f1c4842d5886f4459cf8542a0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3d9e300d3a2b43fa964d35561cff1032] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.3 K 2024-12-11T04:26:45,526 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:45,526 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:45,526 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,526 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/534dabca661a4477a8ca03c8ed11b6c8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/07074c694ed04a83bd2540da86a5ce5f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1291dcb28637487e823be0b88890650c] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.3 K 2024-12-11T04:26:45,526 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6a68583acaf4af5941589ddcdc5f2d1, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1733891201627 2024-12-11T04:26:45,527 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 534dabca661a4477a8ca03c8ed11b6c8, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1733891201627 2024-12-11T04:26:45,527 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting b82e713f1c4842d5886f4459cf8542a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1733891201760 2024-12-11T04:26:45,527 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 07074c694ed04a83bd2540da86a5ce5f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1733891201760 2024-12-11T04:26:45,527 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d9e300d3a2b43fa964d35561cff1032, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1733891202900 2024-12-11T04:26:45,528 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 1291dcb28637487e823be0b88890650c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1733891202900 2024-12-11T04:26:45,542 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#120 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:45,543 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/e608188d63ce439d8854bb57f658243b is 50, key is test_row_0/A:col10/1733891202900/Put/seqid=0 2024-12-11T04:26:45,549 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#121 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:45,550 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/1ae702987a3b4af48f76562042be1927 is 50, key is test_row_0/B:col10/1733891202900/Put/seqid=0 2024-12-11T04:26:45,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741959_1135 (size=13663) 2024-12-11T04:26:45,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741960_1136 (size=13663) 2024-12-11T04:26:45,559 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/e608188d63ce439d8854bb57f658243b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e608188d63ce439d8854bb57f658243b 2024-12-11T04:26:45,565 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into e608188d63ce439d8854bb57f658243b(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:45,565 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:45,565 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891205524; duration=0sec 2024-12-11T04:26:45,565 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:45,565 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:45,565 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:45,566 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:45,566 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:45,567 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,567 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/616d903a6ff14478a079b9a783d5b0d3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/c4af1f1bb2aa4983ac14c3dc2f0ba2fd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/f5a0d1a803a04b93b396e9bc5af632b1] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.3 K 2024-12-11T04:26:45,567 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 616d903a6ff14478a079b9a783d5b0d3, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1733891201627 2024-12-11T04:26:45,568 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4af1f1bb2aa4983ac14c3dc2f0ba2fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1733891201760 2024-12-11T04:26:45,568 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5a0d1a803a04b93b396e9bc5af632b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1733891202900 2024-12-11T04:26:45,578 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#122 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:45,578 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-11T04:26:45,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,579 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:26:45,579 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/dd239d6f309f4a17b845506ef983ed71 is 50, key is test_row_0/C:col10/1733891202900/Put/seqid=0 2024-12-11T04:26:45,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:45,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:45,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:45,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:45,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:45,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:45,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/8166d9ce2c8c4f22ac2f743f0904ff40 is 50, key is test_row_0/A:col10/1733891204685/Put/seqid=0 2024-12-11T04:26:45,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741961_1137 (size=13663) 2024-12-11T04:26:45,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741962_1138 (size=12301) 2024-12-11T04:26:45,595 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=568 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/8166d9ce2c8c4f22ac2f743f0904ff40 2024-12-11T04:26:45,598 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/dd239d6f309f4a17b845506ef983ed71 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/dd239d6f309f4a17b845506ef983ed71 2024-12-11T04:26:45,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/25e957843de64715bdfdc4c388890d9d is 50, key is test_row_0/B:col10/1733891204685/Put/seqid=0 2024-12-11T04:26:45,607 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into dd239d6f309f4a17b845506ef983ed71(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:45,607 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:45,607 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891205525; duration=0sec 2024-12-11T04:26:45,608 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:45,608 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:45,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741963_1139 (size=12301) 2024-12-11T04:26:45,616 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=568 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/25e957843de64715bdfdc4c388890d9d 2024-12-11T04:26:45,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/7fbcfbad68ad45dc98a7ca23ca480379 is 50, key is test_row_0/C:col10/1733891204685/Put/seqid=0 2024-12-11T04:26:45,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741964_1140 (size=12301) 2024-12-11T04:26:45,643 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=568 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/7fbcfbad68ad45dc98a7ca23ca480379 2024-12-11T04:26:45,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/8166d9ce2c8c4f22ac2f743f0904ff40 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/8166d9ce2c8c4f22ac2f743f0904ff40 2024-12-11T04:26:45,656 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/8166d9ce2c8c4f22ac2f743f0904ff40, entries=150, sequenceid=568, filesize=12.0 K 2024-12-11T04:26:45,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/25e957843de64715bdfdc4c388890d9d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/25e957843de64715bdfdc4c388890d9d 2024-12-11T04:26:45,663 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/25e957843de64715bdfdc4c388890d9d, entries=150, sequenceid=568, filesize=12.0 K 2024-12-11T04:26:45,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/7fbcfbad68ad45dc98a7ca23ca480379 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/7fbcfbad68ad45dc98a7ca23ca480379 2024-12-11T04:26:45,673 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/7fbcfbad68ad45dc98a7ca23ca480379, entries=150, sequenceid=568, filesize=12.0 K 2024-12-11T04:26:45,674 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for c8c23c02526ae28f7a94d562fbd47bb4 in 95ms, sequenceid=568, compaction requested=false 2024-12-11T04:26:45,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:45,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-12-11T04:26:45,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-12-11T04:26:45,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-11T04:26:45,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0170 sec 2024-12-11T04:26:45,681 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 1.0240 sec 2024-12-11T04:26:45,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-11T04:26:45,760 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-11T04:26:45,761 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:45,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-12-11T04:26:45,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-11T04:26:45,763 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:45,764 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:45,764 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:45,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:45,805 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:26:45,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:45,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:45,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:45,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:45,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:45,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:45,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/dc4e2a6e54f34f6b8f711c995638576a is 50, key is test_row_0/A:col10/1733891205800/Put/seqid=0 2024-12-11T04:26:45,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741965_1141 (size=12301) 2024-12-11T04:26:45,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:45,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891265844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,847 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:45,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891265844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-11T04:26:45,917 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-11T04:26:45,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:45,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:45,918 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:45,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:45,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:45,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:45,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891265948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:45,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891265950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:45,964 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/1ae702987a3b4af48f76562042be1927 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1ae702987a3b4af48f76562042be1927 2024-12-11T04:26:45,971 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into 1ae702987a3b4af48f76562042be1927(size=13.3 K), total size for store is 25.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:45,971 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:45,971 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891205525; duration=0sec 2024-12-11T04:26:45,971 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:45,971 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-11T04:26:46,071 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:46,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-11T04:26:46,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:46,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:46,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891266152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:46,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:46,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891266152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:46,225 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:46,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-11T04:26:46,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:46,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,226 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=581 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/dc4e2a6e54f34f6b8f711c995638576a 2024-12-11T04:26:46,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/06c90d7a3bca4a88bc690645bb22ae75 is 50, key is test_row_0/B:col10/1733891205800/Put/seqid=0 2024-12-11T04:26:46,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741966_1142 (size=12301) 2024-12-11T04:26:46,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=581 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/06c90d7a3bca4a88bc690645bb22ae75 2024-12-11T04:26:46,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/5777c4e883af4cb4921738538fe7bfd2 is 50, key is test_row_0/C:col10/1733891205800/Put/seqid=0 2024-12-11T04:26:46,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741967_1143 (size=12301) 2024-12-11T04:26:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-11T04:26:46,378 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:46,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-11T04:26:46,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:46,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,379 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39146 deadline: 1733891266454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:46,455 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:39108 deadline: 1733891266455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:46,531 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:46,531 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-11T04:26:46,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:46,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,532 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,605 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12a1285d to 127.0.0.1:50078 2024-12-11T04:26:46,605 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09ed28bb to 127.0.0.1:50078 2024-12-11T04:26:46,605 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:26:46,605 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:26:46,607 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x353bc462 to 127.0.0.1:50078 2024-12-11T04:26:46,607 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:26:46,611 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47fe2fa7 to 127.0.0.1:50078 2024-12-11T04:26:46,611 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:26:46,684 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:46,684 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-11T04:26:46,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:46,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,685 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:46,692 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=581 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/5777c4e883af4cb4921738538fe7bfd2 2024-12-11T04:26:46,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/dc4e2a6e54f34f6b8f711c995638576a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/dc4e2a6e54f34f6b8f711c995638576a 2024-12-11T04:26:46,702 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/dc4e2a6e54f34f6b8f711c995638576a, entries=150, sequenceid=581, filesize=12.0 K 2024-12-11T04:26:46,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/06c90d7a3bca4a88bc690645bb22ae75 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/06c90d7a3bca4a88bc690645bb22ae75 2024-12-11T04:26:46,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/06c90d7a3bca4a88bc690645bb22ae75, entries=150, sequenceid=581, filesize=12.0 K 2024-12-11T04:26:46,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/5777c4e883af4cb4921738538fe7bfd2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/5777c4e883af4cb4921738538fe7bfd2 2024-12-11T04:26:46,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/5777c4e883af4cb4921738538fe7bfd2, entries=150, sequenceid=581, filesize=12.0 K 2024-12-11T04:26:46,713 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for c8c23c02526ae28f7a94d562fbd47bb4 in 908ms, sequenceid=581, compaction requested=true 2024-12-11T04:26:46,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:46,714 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:46,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:46,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:46,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:46,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:46,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c8c23c02526ae28f7a94d562fbd47bb4:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:46,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:46,714 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:46,715 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38265 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:46,715 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/A is initiating minor compaction (all files) 2024-12-11T04:26:46,715 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/A in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,715 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38265 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:46,715 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e608188d63ce439d8854bb57f658243b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/8166d9ce2c8c4f22ac2f743f0904ff40, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/dc4e2a6e54f34f6b8f711c995638576a] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.4 K 2024-12-11T04:26:46,715 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/B is initiating minor compaction (all files) 2024-12-11T04:26:46,715 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/B in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,715 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1ae702987a3b4af48f76562042be1927, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/25e957843de64715bdfdc4c388890d9d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/06c90d7a3bca4a88bc690645bb22ae75] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.4 K 2024-12-11T04:26:46,716 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ae702987a3b4af48f76562042be1927, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1733891202900 2024-12-11T04:26:46,716 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting e608188d63ce439d8854bb57f658243b, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1733891202900 2024-12-11T04:26:46,716 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 25e957843de64715bdfdc4c388890d9d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=568, earliestPutTs=1733891204675 2024-12-11T04:26:46,716 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8166d9ce2c8c4f22ac2f743f0904ff40, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=568, earliestPutTs=1733891204675 2024-12-11T04:26:46,717 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 06c90d7a3bca4a88bc690645bb22ae75, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=581, earliestPutTs=1733891205800 2024-12-11T04:26:46,717 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc4e2a6e54f34f6b8f711c995638576a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=581, earliestPutTs=1733891205800 2024-12-11T04:26:46,725 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#B#compaction#129 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:46,726 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/f201f7eec9cc4463a1dd9239163bbd54 is 50, key is test_row_0/B:col10/1733891205800/Put/seqid=0 2024-12-11T04:26:46,726 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#A#compaction#130 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:46,727 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/e5b45c171894447ea9904224777e8247 is 50, key is test_row_0/A:col10/1733891205800/Put/seqid=0 2024-12-11T04:26:46,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741968_1144 (size=13765) 2024-12-11T04:26:46,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741969_1145 (size=13765) 2024-12-11T04:26:46,735 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/f201f7eec9cc4463a1dd9239163bbd54 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/f201f7eec9cc4463a1dd9239163bbd54 2024-12-11T04:26:46,739 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/B of c8c23c02526ae28f7a94d562fbd47bb4 into f201f7eec9cc4463a1dd9239163bbd54(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:46,740 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:46,740 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/B, priority=13, startTime=1733891206714; duration=0sec 2024-12-11T04:26:46,740 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:46,740 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:B 2024-12-11T04:26:46,740 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:46,741 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38265 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:46,741 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c8c23c02526ae28f7a94d562fbd47bb4/C is initiating minor compaction (all files) 2024-12-11T04:26:46,741 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c8c23c02526ae28f7a94d562fbd47bb4/C in TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,741 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/dd239d6f309f4a17b845506ef983ed71, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/7fbcfbad68ad45dc98a7ca23ca480379, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/5777c4e883af4cb4921738538fe7bfd2] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp, totalSize=37.4 K 2024-12-11T04:26:46,741 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting dd239d6f309f4a17b845506ef983ed71, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=543, earliestPutTs=1733891202900 2024-12-11T04:26:46,741 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fbcfbad68ad45dc98a7ca23ca480379, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=568, earliestPutTs=1733891204675 2024-12-11T04:26:46,742 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5777c4e883af4cb4921738538fe7bfd2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=581, earliestPutTs=1733891205800 2024-12-11T04:26:46,748 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c8c23c02526ae28f7a94d562fbd47bb4#C#compaction#131 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:46,749 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/6ce53b2ea98147a0ad112caf0547788f is 50, key is test_row_0/C:col10/1733891205800/Put/seqid=0 2024-12-11T04:26:46,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741970_1146 (size=13765) 2024-12-11T04:26:46,837 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:46,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-11T04:26:46,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:46,838 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T04:26:46,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:46,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:46,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:46,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:46,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:46,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:46,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/f56c5fe0c5e0427882fc07511b0069fa is 50, key is test_row_0/A:col10/1733891205835/Put/seqid=0 2024-12-11T04:26:46,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741971_1147 (size=12301) 2024-12-11T04:26:46,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-11T04:26:46,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:46,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. as already flushing 2024-12-11T04:26:46,958 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18cb251d to 127.0.0.1:50078 2024-12-11T04:26:46,958 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:26:46,961 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x45b55c24 to 127.0.0.1:50078 2024-12-11T04:26:46,961 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:26:47,029 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f2091cc to 127.0.0.1:50078 2024-12-11T04:26:47,029 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:26:47,038 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e52b42a to 127.0.0.1:50078 2024-12-11T04:26:47,038 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:26:47,038 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09bd0964 to 127.0.0.1:50078 2024-12-11T04:26:47,038 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:26:47,138 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/e5b45c171894447ea9904224777e8247 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e5b45c171894447ea9904224777e8247 2024-12-11T04:26:47,144 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/A of c8c23c02526ae28f7a94d562fbd47bb4 into e5b45c171894447ea9904224777e8247(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:47,144 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:47,144 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/A, priority=13, startTime=1733891206714; duration=0sec 2024-12-11T04:26:47,145 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:47,145 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:A 2024-12-11T04:26:47,157 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/6ce53b2ea98147a0ad112caf0547788f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6ce53b2ea98147a0ad112caf0547788f 2024-12-11T04:26:47,162 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c8c23c02526ae28f7a94d562fbd47bb4/C of c8c23c02526ae28f7a94d562fbd47bb4 into 6ce53b2ea98147a0ad112caf0547788f(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:47,162 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:47,162 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4., storeName=c8c23c02526ae28f7a94d562fbd47bb4/C, priority=13, startTime=1733891206714; duration=0sec 2024-12-11T04:26:47,162 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:47,162 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c8c23c02526ae28f7a94d562fbd47bb4:C 2024-12-11T04:26:47,246 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=608 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/f56c5fe0c5e0427882fc07511b0069fa 2024-12-11T04:26:47,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/b426ba567d67497da879de1f7c43c8ef is 50, key is test_row_0/B:col10/1733891205835/Put/seqid=0 2024-12-11T04:26:47,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741972_1148 (size=12301) 2024-12-11T04:26:47,659 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=608 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/b426ba567d67497da879de1f7c43c8ef 2024-12-11T04:26:47,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/d1cff00894b24c36900ac2f06dfd27b9 is 50, key is test_row_0/C:col10/1733891205835/Put/seqid=0 2024-12-11T04:26:47,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741973_1149 (size=12301) 2024-12-11T04:26:47,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-11T04:26:48,071 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=608 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/d1cff00894b24c36900ac2f06dfd27b9 2024-12-11T04:26:48,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/f56c5fe0c5e0427882fc07511b0069fa as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/f56c5fe0c5e0427882fc07511b0069fa 2024-12-11T04:26:48,080 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/f56c5fe0c5e0427882fc07511b0069fa, entries=150, sequenceid=608, filesize=12.0 K 2024-12-11T04:26:48,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/b426ba567d67497da879de1f7c43c8ef as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b426ba567d67497da879de1f7c43c8ef 2024-12-11T04:26:48,084 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b426ba567d67497da879de1f7c43c8ef, entries=150, sequenceid=608, filesize=12.0 K 2024-12-11T04:26:48,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/d1cff00894b24c36900ac2f06dfd27b9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/d1cff00894b24c36900ac2f06dfd27b9 2024-12-11T04:26:48,089 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/d1cff00894b24c36900ac2f06dfd27b9, entries=150, sequenceid=608, filesize=12.0 K 2024-12-11T04:26:48,090 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=33.54 KB/34350 for c8c23c02526ae28f7a94d562fbd47bb4 in 1252ms, sequenceid=608, compaction requested=false 2024-12-11T04:26:48,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:48,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:48,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-12-11T04:26:48,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-12-11T04:26:48,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-11T04:26:48,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3270 sec 2024-12-11T04:26:48,094 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 2.3320 sec 2024-12-11T04:26:48,617 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T04:26:49,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-11T04:26:49,868 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 89 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 88 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 110 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 100 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 92 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5799 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5809 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2587 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7761 rows 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2591 2024-12-11T04:26:49,868 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7772 rows 2024-12-11T04:26:49,868 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T04:26:49,868 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6fcb5f29 to 127.0.0.1:50078 2024-12-11T04:26:49,868 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:26:49,871 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-11T04:26:49,877 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-11T04:26:49,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-11T04:26:49,884 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891209884"}]},"ts":"1733891209884"} 2024-12-11T04:26:49,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-11T04:26:49,885 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-11T04:26:49,888 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-11T04:26:49,889 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T04:26:49,893 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c8c23c02526ae28f7a94d562fbd47bb4, UNASSIGN}] 2024-12-11T04:26:49,894 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c8c23c02526ae28f7a94d562fbd47bb4, UNASSIGN 2024-12-11T04:26:49,894 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=c8c23c02526ae28f7a94d562fbd47bb4, regionState=CLOSING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:49,895 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T04:26:49,896 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:26:49,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-11T04:26:50,050 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:50,052 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:50,052 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T04:26:50,053 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing c8c23c02526ae28f7a94d562fbd47bb4, disabling compactions & flushes 2024-12-11T04:26:50,053 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:50,053 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:50,053 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. after waiting 0 ms 2024-12-11T04:26:50,053 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:50,053 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(2837): Flushing c8c23c02526ae28f7a94d562fbd47bb4 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-11T04:26:50,053 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=A 2024-12-11T04:26:50,054 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:50,054 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=B 2024-12-11T04:26:50,054 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:50,054 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c8c23c02526ae28f7a94d562fbd47bb4, store=C 2024-12-11T04:26:50,054 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:50,059 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5520a0d1eb7443829d8f2527cb427e81 is 50, key is test_row_0/A:col10/1733891207028/Put/seqid=0 2024-12-11T04:26:50,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741974_1150 (size=9857) 2024-12-11T04:26:50,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-11T04:26:50,464 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=618 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5520a0d1eb7443829d8f2527cb427e81 2024-12-11T04:26:50,472 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/207aa72b11324805afff50610f7b9772 is 50, key is test_row_0/B:col10/1733891207028/Put/seqid=0 2024-12-11T04:26:50,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741975_1151 (size=9857) 2024-12-11T04:26:50,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-11T04:26:50,876 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=618 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/207aa72b11324805afff50610f7b9772 2024-12-11T04:26:50,885 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/f1efb52157954e429cc2c364b1bc181a is 50, key is test_row_0/C:col10/1733891207028/Put/seqid=0 2024-12-11T04:26:50,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741976_1152 (size=9857) 2024-12-11T04:26:50,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-11T04:26:51,290 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=618 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/f1efb52157954e429cc2c364b1bc181a 2024-12-11T04:26:51,295 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/A/5520a0d1eb7443829d8f2527cb427e81 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5520a0d1eb7443829d8f2527cb427e81 2024-12-11T04:26:51,299 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5520a0d1eb7443829d8f2527cb427e81, entries=100, sequenceid=618, filesize=9.6 K 2024-12-11T04:26:51,300 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/B/207aa72b11324805afff50610f7b9772 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/207aa72b11324805afff50610f7b9772 2024-12-11T04:26:51,304 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/207aa72b11324805afff50610f7b9772, entries=100, sequenceid=618, filesize=9.6 K 2024-12-11T04:26:51,304 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/.tmp/C/f1efb52157954e429cc2c364b1bc181a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/f1efb52157954e429cc2c364b1bc181a 2024-12-11T04:26:51,308 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/f1efb52157954e429cc2c364b1bc181a, entries=100, sequenceid=618, filesize=9.6 K 2024-12-11T04:26:51,309 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for c8c23c02526ae28f7a94d562fbd47bb4 in 1256ms, sequenceid=618, compaction requested=true 2024-12-11T04:26:51,310 DEBUG [StoreCloser-TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/0e6894392db447e6bcd16ce828576b3c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/2a84c15c354c47f480d3dbe67c4b3cc5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5a4b4f64059c478ba193dbd554649405, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/88bbb5debb364f7b9d47f3def2fd0b6d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/9c183b20da19439aa33729c6f605e9eb, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5fc1999774654217bb649da43963aea4, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/326763d2c70e4256abf5ecbbcec31a21, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/0cf6791a0f3342a8b42c9ec46bb31fd5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/044e916043df46b4ae5086ac84c83527, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e95926f5d5884e9c9cecd155b438ae0e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/afdbaaa4e64e4460a445f2fd084b4262, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/d235572b7dd745e49005a63f2c4d7695, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/375b26247cc44950ac96e74acdff4f11, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/11e46bba6c954fdf800428e022eec11a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/145d047a69dc421ca25c809357504060, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/9937fd85f70e437786f15cc4a0641ef5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5b5b653c06784bd8b5371dd100dc2905, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ea53ae604e834bcbae3baa8eec4c6cba, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3ac4ae70e1844f528c7c3662f9ddbc68, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3a0d793ccff94af197e7b73363376390, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/97cd757e64b442f48677d0f24449ad75, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/22b16a59a935484685592d9b1c734513, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/62f28cd2a77b46fc9ff95cb0acbda299, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3546ec8a45fd4d92ba70990e363e0bda, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ac229882ab8548459e7def2c7347b63d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5d32d2af5b324167b6459b5c85a66eb0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/f053ff63bc734d38a23eddf1ed5933b7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/370e5e22c8c449a6ac990be06388e217, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/8881ba1ba42048dfa841b9b5638a8ddd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/949d3281201b418b88da9e2e060955ac, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ed1bdad3af904ad7bfdbf4ff98f29ad1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/47f259cd0cce4ac4b66fddc98f4d804a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/611537c2b66944a3a46a087a5189caf3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/4ea1755ea8cd48ce9944780bfe4e13f7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/7062aa885bec45ba8aa4ca029cd52b8c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e0940d4f79b54830986c57e7c4e0f601, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/c6a68583acaf4af5941589ddcdc5f2d1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/a79b762507144a72b10569ee21a33367, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/b82e713f1c4842d5886f4459cf8542a0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e608188d63ce439d8854bb57f658243b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3d9e300d3a2b43fa964d35561cff1032, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/8166d9ce2c8c4f22ac2f743f0904ff40, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/dc4e2a6e54f34f6b8f711c995638576a] to archive 2024-12-11T04:26:51,313 DEBUG [StoreCloser-TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:26:51,321 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/0e6894392db447e6bcd16ce828576b3c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/0e6894392db447e6bcd16ce828576b3c 2024-12-11T04:26:51,321 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5a4b4f64059c478ba193dbd554649405 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5a4b4f64059c478ba193dbd554649405 2024-12-11T04:26:51,321 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/88bbb5debb364f7b9d47f3def2fd0b6d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/88bbb5debb364f7b9d47f3def2fd0b6d 2024-12-11T04:26:51,321 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/0cf6791a0f3342a8b42c9ec46bb31fd5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/0cf6791a0f3342a8b42c9ec46bb31fd5 2024-12-11T04:26:51,321 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/9c183b20da19439aa33729c6f605e9eb to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/9c183b20da19439aa33729c6f605e9eb 2024-12-11T04:26:51,321 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/2a84c15c354c47f480d3dbe67c4b3cc5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/2a84c15c354c47f480d3dbe67c4b3cc5 2024-12-11T04:26:51,322 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/326763d2c70e4256abf5ecbbcec31a21 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/326763d2c70e4256abf5ecbbcec31a21 2024-12-11T04:26:51,322 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5fc1999774654217bb649da43963aea4 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5fc1999774654217bb649da43963aea4 2024-12-11T04:26:51,325 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/044e916043df46b4ae5086ac84c83527 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/044e916043df46b4ae5086ac84c83527 2024-12-11T04:26:51,325 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e95926f5d5884e9c9cecd155b438ae0e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e95926f5d5884e9c9cecd155b438ae0e 2024-12-11T04:26:51,325 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/9937fd85f70e437786f15cc4a0641ef5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/9937fd85f70e437786f15cc4a0641ef5 2024-12-11T04:26:51,325 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/afdbaaa4e64e4460a445f2fd084b4262 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/afdbaaa4e64e4460a445f2fd084b4262 2024-12-11T04:26:51,325 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/d235572b7dd745e49005a63f2c4d7695 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/d235572b7dd745e49005a63f2c4d7695 2024-12-11T04:26:51,326 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/145d047a69dc421ca25c809357504060 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/145d047a69dc421ca25c809357504060 2024-12-11T04:26:51,326 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/375b26247cc44950ac96e74acdff4f11 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/375b26247cc44950ac96e74acdff4f11 2024-12-11T04:26:51,328 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3a0d793ccff94af197e7b73363376390 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3a0d793ccff94af197e7b73363376390 2024-12-11T04:26:51,328 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3ac4ae70e1844f528c7c3662f9ddbc68 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3ac4ae70e1844f528c7c3662f9ddbc68 2024-12-11T04:26:51,328 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5b5b653c06784bd8b5371dd100dc2905 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5b5b653c06784bd8b5371dd100dc2905 2024-12-11T04:26:51,328 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/22b16a59a935484685592d9b1c734513 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/22b16a59a935484685592d9b1c734513 2024-12-11T04:26:51,328 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ea53ae604e834bcbae3baa8eec4c6cba to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ea53ae604e834bcbae3baa8eec4c6cba 2024-12-11T04:26:51,328 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/97cd757e64b442f48677d0f24449ad75 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/97cd757e64b442f48677d0f24449ad75 2024-12-11T04:26:51,329 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/62f28cd2a77b46fc9ff95cb0acbda299 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/62f28cd2a77b46fc9ff95cb0acbda299 2024-12-11T04:26:51,330 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/8881ba1ba42048dfa841b9b5638a8ddd to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/8881ba1ba42048dfa841b9b5638a8ddd 2024-12-11T04:26:51,331 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ac229882ab8548459e7def2c7347b63d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ac229882ab8548459e7def2c7347b63d 2024-12-11T04:26:51,331 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/370e5e22c8c449a6ac990be06388e217 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/370e5e22c8c449a6ac990be06388e217 2024-12-11T04:26:51,331 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3546ec8a45fd4d92ba70990e363e0bda to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3546ec8a45fd4d92ba70990e363e0bda 2024-12-11T04:26:51,332 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5d32d2af5b324167b6459b5c85a66eb0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5d32d2af5b324167b6459b5c85a66eb0 2024-12-11T04:26:51,332 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/f053ff63bc734d38a23eddf1ed5933b7 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/f053ff63bc734d38a23eddf1ed5933b7 2024-12-11T04:26:51,333 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/949d3281201b418b88da9e2e060955ac to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/949d3281201b418b88da9e2e060955ac 2024-12-11T04:26:51,333 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/11e46bba6c954fdf800428e022eec11a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/11e46bba6c954fdf800428e022eec11a 2024-12-11T04:26:51,333 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ed1bdad3af904ad7bfdbf4ff98f29ad1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/ed1bdad3af904ad7bfdbf4ff98f29ad1 2024-12-11T04:26:51,333 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/47f259cd0cce4ac4b66fddc98f4d804a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/47f259cd0cce4ac4b66fddc98f4d804a 2024-12-11T04:26:51,334 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/611537c2b66944a3a46a087a5189caf3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/611537c2b66944a3a46a087a5189caf3 2024-12-11T04:26:51,334 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/4ea1755ea8cd48ce9944780bfe4e13f7 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/4ea1755ea8cd48ce9944780bfe4e13f7 2024-12-11T04:26:51,334 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e0940d4f79b54830986c57e7c4e0f601 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e0940d4f79b54830986c57e7c4e0f601 2024-12-11T04:26:51,335 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/7062aa885bec45ba8aa4ca029cd52b8c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/7062aa885bec45ba8aa4ca029cd52b8c 2024-12-11T04:26:51,335 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/c6a68583acaf4af5941589ddcdc5f2d1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/c6a68583acaf4af5941589ddcdc5f2d1 2024-12-11T04:26:51,335 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e608188d63ce439d8854bb57f658243b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e608188d63ce439d8854bb57f658243b 2024-12-11T04:26:51,336 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/a79b762507144a72b10569ee21a33367 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/a79b762507144a72b10569ee21a33367 2024-12-11T04:26:51,336 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/b82e713f1c4842d5886f4459cf8542a0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/b82e713f1c4842d5886f4459cf8542a0 2024-12-11T04:26:51,336 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/8166d9ce2c8c4f22ac2f743f0904ff40 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/8166d9ce2c8c4f22ac2f743f0904ff40 2024-12-11T04:26:51,336 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/dc4e2a6e54f34f6b8f711c995638576a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/dc4e2a6e54f34f6b8f711c995638576a 2024-12-11T04:26:51,337 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3d9e300d3a2b43fa964d35561cff1032 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/3d9e300d3a2b43fa964d35561cff1032 2024-12-11T04:26:51,354 DEBUG [StoreCloser-TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/88b7fb6ad8eb4522981555a9c89f8fb2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/d9a4aba5b75f4c7eb337238f37ee06cc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1715d09c8f6e4ec684e9d3a95b9539e1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/14b170e24c1148b09d3c03d0b462c974, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/7d8c05e6ebdd48bbb0e62d690f655920, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/fa1c6b5eea8e45d4ba5534c373d232ca, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/88778e6523c641528a0a7d9393a390bd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/159e6bcc07be443d816600856ad6b75e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/7597a582fd33493493f9eddc7c6d9707, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/f2fc1de793564afe9f5ba84a05ec08a6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/532c620f0c1d44ea805c9592e180e39e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e2a5b4b75cd64466ad60d1ae1149f8cc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/6e5bd8a6d3e64f778ee2b7a18a7eccce, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5907226f93a94c02978330194a5fa9cd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b96936c1ca5a4173a7557026da633d8b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/2800ee67287c4385bf1029361bea29f5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/33976c29cff548b18b4347c3d665e47f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e53f7586497648589b26ae9d3e582266, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/237c97a65cdc4664862af8026c09c323, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b1b4b4c81a75432190f3c16d2f7201d2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/ab165c507ec2465195239969fa29bfd2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5ee4adbb838649be85d1a1441c8b159c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/8a2a5fb2967e454d908a9d0ada9441ca, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/ac87311c50d647a2b730e854e6d41afe, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/3bdf2925d66e45ff9c71811aa2f8ade1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e438cc89ec6f4d1baeca3e1c344fffa2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/4515034585ea416faf60217a34da1b1e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/171be08ec3cd4825b6fb59d93d2737ce, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/8f02a464fab34f33a9d295ad3168ebc1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5f152bc98570425d8b6ce8435dccd99c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/46b15beb62a04dbfbf542714d7731bf3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/bcb3061d98d94492a7aec954b044b1a0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/11f4569bbc694ee09d9aa9279759f41a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/98e39b59b6c643d288957f94c2a5c356, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b52140832a89462f8720c41949635206, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5eed8d8f7b654d60b49a474b773b51d7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/534dabca661a4477a8ca03c8ed11b6c8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/65f1620645df4b9eab61d7a67127e464, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/07074c694ed04a83bd2540da86a5ce5f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1ae702987a3b4af48f76562042be1927, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1291dcb28637487e823be0b88890650c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/25e957843de64715bdfdc4c388890d9d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/06c90d7a3bca4a88bc690645bb22ae75] to archive 2024-12-11T04:26:51,355 DEBUG [StoreCloser-TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:26:51,358 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/14b170e24c1148b09d3c03d0b462c974 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/14b170e24c1148b09d3c03d0b462c974 2024-12-11T04:26:51,358 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/d9a4aba5b75f4c7eb337238f37ee06cc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/d9a4aba5b75f4c7eb337238f37ee06cc 2024-12-11T04:26:51,358 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/fa1c6b5eea8e45d4ba5534c373d232ca to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/fa1c6b5eea8e45d4ba5534c373d232ca 2024-12-11T04:26:51,359 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/88b7fb6ad8eb4522981555a9c89f8fb2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/88b7fb6ad8eb4522981555a9c89f8fb2 2024-12-11T04:26:51,359 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/88778e6523c641528a0a7d9393a390bd to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/88778e6523c641528a0a7d9393a390bd 2024-12-11T04:26:51,360 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/159e6bcc07be443d816600856ad6b75e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/159e6bcc07be443d816600856ad6b75e 2024-12-11T04:26:51,360 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1715d09c8f6e4ec684e9d3a95b9539e1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1715d09c8f6e4ec684e9d3a95b9539e1 2024-12-11T04:26:51,360 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/7597a582fd33493493f9eddc7c6d9707 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/7597a582fd33493493f9eddc7c6d9707 2024-12-11T04:26:51,362 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/532c620f0c1d44ea805c9592e180e39e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/532c620f0c1d44ea805c9592e180e39e 2024-12-11T04:26:51,362 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e2a5b4b75cd64466ad60d1ae1149f8cc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e2a5b4b75cd64466ad60d1ae1149f8cc 2024-12-11T04:26:51,362 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/6e5bd8a6d3e64f778ee2b7a18a7eccce to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/6e5bd8a6d3e64f778ee2b7a18a7eccce 2024-12-11T04:26:51,362 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/f2fc1de793564afe9f5ba84a05ec08a6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/f2fc1de793564afe9f5ba84a05ec08a6 2024-12-11T04:26:51,363 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5907226f93a94c02978330194a5fa9cd to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5907226f93a94c02978330194a5fa9cd 2024-12-11T04:26:51,363 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/2800ee67287c4385bf1029361bea29f5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/2800ee67287c4385bf1029361bea29f5 2024-12-11T04:26:51,363 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b96936c1ca5a4173a7557026da633d8b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b96936c1ca5a4173a7557026da633d8b 2024-12-11T04:26:51,364 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/33976c29cff548b18b4347c3d665e47f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/33976c29cff548b18b4347c3d665e47f 2024-12-11T04:26:51,365 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/7d8c05e6ebdd48bbb0e62d690f655920 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/7d8c05e6ebdd48bbb0e62d690f655920 2024-12-11T04:26:51,365 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/237c97a65cdc4664862af8026c09c323 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/237c97a65cdc4664862af8026c09c323 2024-12-11T04:26:51,367 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/ab165c507ec2465195239969fa29bfd2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/ab165c507ec2465195239969fa29bfd2 2024-12-11T04:26:51,367 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5ee4adbb838649be85d1a1441c8b159c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5ee4adbb838649be85d1a1441c8b159c 2024-12-11T04:26:51,367 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e53f7586497648589b26ae9d3e582266 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e53f7586497648589b26ae9d3e582266 2024-12-11T04:26:51,367 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b1b4b4c81a75432190f3c16d2f7201d2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b1b4b4c81a75432190f3c16d2f7201d2 2024-12-11T04:26:51,367 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/8a2a5fb2967e454d908a9d0ada9441ca to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/8a2a5fb2967e454d908a9d0ada9441ca 2024-12-11T04:26:51,367 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/ac87311c50d647a2b730e854e6d41afe to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/ac87311c50d647a2b730e854e6d41afe 2024-12-11T04:26:51,367 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e438cc89ec6f4d1baeca3e1c344fffa2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/e438cc89ec6f4d1baeca3e1c344fffa2 2024-12-11T04:26:51,368 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/3bdf2925d66e45ff9c71811aa2f8ade1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/3bdf2925d66e45ff9c71811aa2f8ade1 2024-12-11T04:26:51,369 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/171be08ec3cd4825b6fb59d93d2737ce to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/171be08ec3cd4825b6fb59d93d2737ce 2024-12-11T04:26:51,369 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/4515034585ea416faf60217a34da1b1e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/4515034585ea416faf60217a34da1b1e 2024-12-11T04:26:51,370 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5f152bc98570425d8b6ce8435dccd99c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5f152bc98570425d8b6ce8435dccd99c 2024-12-11T04:26:51,371 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/11f4569bbc694ee09d9aa9279759f41a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/11f4569bbc694ee09d9aa9279759f41a 2024-12-11T04:26:51,371 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/8f02a464fab34f33a9d295ad3168ebc1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/8f02a464fab34f33a9d295ad3168ebc1 2024-12-11T04:26:51,371 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/bcb3061d98d94492a7aec954b044b1a0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/bcb3061d98d94492a7aec954b044b1a0 2024-12-11T04:26:51,371 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/98e39b59b6c643d288957f94c2a5c356 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/98e39b59b6c643d288957f94c2a5c356 2024-12-11T04:26:51,372 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5eed8d8f7b654d60b49a474b773b51d7 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/5eed8d8f7b654d60b49a474b773b51d7 2024-12-11T04:26:51,372 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b52140832a89462f8720c41949635206 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b52140832a89462f8720c41949635206 2024-12-11T04:26:51,373 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/534dabca661a4477a8ca03c8ed11b6c8 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/534dabca661a4477a8ca03c8ed11b6c8 2024-12-11T04:26:51,374 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/65f1620645df4b9eab61d7a67127e464 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/65f1620645df4b9eab61d7a67127e464 2024-12-11T04:26:51,374 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1ae702987a3b4af48f76562042be1927 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1ae702987a3b4af48f76562042be1927 2024-12-11T04:26:51,374 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/07074c694ed04a83bd2540da86a5ce5f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/07074c694ed04a83bd2540da86a5ce5f 2024-12-11T04:26:51,374 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1291dcb28637487e823be0b88890650c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/1291dcb28637487e823be0b88890650c 2024-12-11T04:26:51,374 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/06c90d7a3bca4a88bc690645bb22ae75 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/06c90d7a3bca4a88bc690645bb22ae75 2024-12-11T04:26:51,375 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/25e957843de64715bdfdc4c388890d9d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/25e957843de64715bdfdc4c388890d9d 2024-12-11T04:26:51,381 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/46b15beb62a04dbfbf542714d7731bf3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/46b15beb62a04dbfbf542714d7731bf3 2024-12-11T04:26:51,383 DEBUG [StoreCloser-TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/a5e752cdb82d41d59be5f043f543b27a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/23bb06ca4ce944319eed3cb6b1eb57ab, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6d94b2bf9d684549b09cf7ea261bbae1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/64657ca9ff154d0aa4e46ba87875f2b9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9f1dc652c61b429d951260284ec02979, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/689c5752e9a8487dbc9e824cb82034f6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9f85eb2218094c0e86cd9e13a3b381c6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ffba4a8a19314d82853e7744cdf48037, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/317a3c352bdd4cc0b699b2cb57a1ed5b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/71d7c9689fc1413f93ceba775234791d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9a6350ddeaef441a93fa71f41ea459eb, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/07caff6d6cb84bab9406fa236d7d7982, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/d59cd27e729b4510a43cde0b541f6316, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e3daf39b4dd442f88a14e4cd0fafea1b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/3fda5234be284a16a367612da304acd3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/8ba2f9317d254ad6bf88fd24aec45c41, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/0c83aebe0f1547c0949bc7be6c843cad, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ee899e2620ff4bbbbb16cc42db1c7dd0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cec8f467c8f3471885cfadc09dd073e5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/98b4e14201844fa78b86fb315a918b75, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/5448e84d8098494688d4b6a8fd0fbd15, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e5e2af57af4f4843aeb06204980fbe72, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/bcb5b0ed791c40298b8109d3b853d570, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/8d0e9c0e9e3a4674b2543b623bbcdd96, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6b43b8dfb2574000a6028092a880cf7b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/0c4fff1e9ce14f498eca8b0eb382abd2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6a933c73acfa41faaa1d609d1d2664f5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ea100281f3434bfbb20592f00b1dc13e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/44ef559838f64cdda3a09b98ea5c8728, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e72cb566d06440faab8b79819a1a31a4, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cb0b40e5fc0842d5acf4f5e42912330c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/930fb8c231494fe886774c0419ebb3a1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6359afd497e84fb2a12817d39fd1aac6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/a7cb90db28f84d6b9c6f480327adc6dd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/14fb264f38234e8eaab67cf3e50d4bdb, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/82fcf54a2dff4e50acdd2e8923c98733, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/616d903a6ff14478a079b9a783d5b0d3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cfad4bcb3aaa40a7ae35d8d7af82cc40, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/c4af1f1bb2aa4983ac14c3dc2f0ba2fd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/dd239d6f309f4a17b845506ef983ed71, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/f5a0d1a803a04b93b396e9bc5af632b1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/7fbcfbad68ad45dc98a7ca23ca480379, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/5777c4e883af4cb4921738538fe7bfd2] to archive 2024-12-11T04:26:51,384 DEBUG [StoreCloser-TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:26:51,387 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/23bb06ca4ce944319eed3cb6b1eb57ab to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/23bb06ca4ce944319eed3cb6b1eb57ab 2024-12-11T04:26:51,387 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/64657ca9ff154d0aa4e46ba87875f2b9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/64657ca9ff154d0aa4e46ba87875f2b9 2024-12-11T04:26:51,387 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6d94b2bf9d684549b09cf7ea261bbae1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6d94b2bf9d684549b09cf7ea261bbae1 2024-12-11T04:26:51,387 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9f85eb2218094c0e86cd9e13a3b381c6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9f85eb2218094c0e86cd9e13a3b381c6 2024-12-11T04:26:51,387 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9f1dc652c61b429d951260284ec02979 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9f1dc652c61b429d951260284ec02979 2024-12-11T04:26:51,388 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/689c5752e9a8487dbc9e824cb82034f6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/689c5752e9a8487dbc9e824cb82034f6 2024-12-11T04:26:51,389 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/a5e752cdb82d41d59be5f043f543b27a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/a5e752cdb82d41d59be5f043f543b27a 2024-12-11T04:26:51,389 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/317a3c352bdd4cc0b699b2cb57a1ed5b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/317a3c352bdd4cc0b699b2cb57a1ed5b 2024-12-11T04:26:51,392 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9a6350ddeaef441a93fa71f41ea459eb to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/9a6350ddeaef441a93fa71f41ea459eb 2024-12-11T04:26:51,392 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/07caff6d6cb84bab9406fa236d7d7982 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/07caff6d6cb84bab9406fa236d7d7982 2024-12-11T04:26:51,392 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/d59cd27e729b4510a43cde0b541f6316 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/d59cd27e729b4510a43cde0b541f6316 2024-12-11T04:26:51,392 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e3daf39b4dd442f88a14e4cd0fafea1b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e3daf39b4dd442f88a14e4cd0fafea1b 2024-12-11T04:26:51,393 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/71d7c9689fc1413f93ceba775234791d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/71d7c9689fc1413f93ceba775234791d 2024-12-11T04:26:51,394 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/8ba2f9317d254ad6bf88fd24aec45c41 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/8ba2f9317d254ad6bf88fd24aec45c41 2024-12-11T04:26:51,395 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/0c83aebe0f1547c0949bc7be6c843cad to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/0c83aebe0f1547c0949bc7be6c843cad 2024-12-11T04:26:51,395 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/3fda5234be284a16a367612da304acd3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/3fda5234be284a16a367612da304acd3 2024-12-11T04:26:51,395 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/98b4e14201844fa78b86fb315a918b75 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/98b4e14201844fa78b86fb315a918b75 2024-12-11T04:26:51,395 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cec8f467c8f3471885cfadc09dd073e5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cec8f467c8f3471885cfadc09dd073e5 2024-12-11T04:26:51,397 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ee899e2620ff4bbbbb16cc42db1c7dd0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ee899e2620ff4bbbbb16cc42db1c7dd0 2024-12-11T04:26:51,397 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/5448e84d8098494688d4b6a8fd0fbd15 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/5448e84d8098494688d4b6a8fd0fbd15 2024-12-11T04:26:51,397 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e5e2af57af4f4843aeb06204980fbe72 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e5e2af57af4f4843aeb06204980fbe72 2024-12-11T04:26:51,398 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ffba4a8a19314d82853e7744cdf48037 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ffba4a8a19314d82853e7744cdf48037 2024-12-11T04:26:51,398 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/bcb5b0ed791c40298b8109d3b853d570 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/bcb5b0ed791c40298b8109d3b853d570 2024-12-11T04:26:51,398 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6b43b8dfb2574000a6028092a880cf7b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6b43b8dfb2574000a6028092a880cf7b 2024-12-11T04:26:51,399 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/8d0e9c0e9e3a4674b2543b623bbcdd96 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/8d0e9c0e9e3a4674b2543b623bbcdd96 2024-12-11T04:26:51,401 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/0c4fff1e9ce14f498eca8b0eb382abd2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/0c4fff1e9ce14f498eca8b0eb382abd2 2024-12-11T04:26:51,401 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6a933c73acfa41faaa1d609d1d2664f5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6a933c73acfa41faaa1d609d1d2664f5 2024-12-11T04:26:51,402 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/44ef559838f64cdda3a09b98ea5c8728 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/44ef559838f64cdda3a09b98ea5c8728 2024-12-11T04:26:51,403 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ea100281f3434bfbb20592f00b1dc13e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/ea100281f3434bfbb20592f00b1dc13e 2024-12-11T04:26:51,403 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cb0b40e5fc0842d5acf4f5e42912330c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cb0b40e5fc0842d5acf4f5e42912330c 2024-12-11T04:26:51,403 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e72cb566d06440faab8b79819a1a31a4 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/e72cb566d06440faab8b79819a1a31a4 2024-12-11T04:26:51,404 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/930fb8c231494fe886774c0419ebb3a1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/930fb8c231494fe886774c0419ebb3a1 2024-12-11T04:26:51,405 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6359afd497e84fb2a12817d39fd1aac6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6359afd497e84fb2a12817d39fd1aac6 2024-12-11T04:26:51,405 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/a7cb90db28f84d6b9c6f480327adc6dd to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/a7cb90db28f84d6b9c6f480327adc6dd 2024-12-11T04:26:51,406 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/c4af1f1bb2aa4983ac14c3dc2f0ba2fd to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/c4af1f1bb2aa4983ac14c3dc2f0ba2fd 2024-12-11T04:26:51,406 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cfad4bcb3aaa40a7ae35d8d7af82cc40 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/cfad4bcb3aaa40a7ae35d8d7af82cc40 2024-12-11T04:26:51,406 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/82fcf54a2dff4e50acdd2e8923c98733 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/82fcf54a2dff4e50acdd2e8923c98733 2024-12-11T04:26:51,406 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/616d903a6ff14478a079b9a783d5b0d3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/616d903a6ff14478a079b9a783d5b0d3 2024-12-11T04:26:51,406 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/14fb264f38234e8eaab67cf3e50d4bdb to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/14fb264f38234e8eaab67cf3e50d4bdb 2024-12-11T04:26:51,407 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/dd239d6f309f4a17b845506ef983ed71 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/dd239d6f309f4a17b845506ef983ed71 2024-12-11T04:26:51,408 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/7fbcfbad68ad45dc98a7ca23ca480379 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/7fbcfbad68ad45dc98a7ca23ca480379 2024-12-11T04:26:51,408 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/5777c4e883af4cb4921738538fe7bfd2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/5777c4e883af4cb4921738538fe7bfd2 2024-12-11T04:26:51,409 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/f5a0d1a803a04b93b396e9bc5af632b1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/f5a0d1a803a04b93b396e9bc5af632b1 2024-12-11T04:26:51,416 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/recovered.edits/621.seqid, newMaxSeqId=621, maxSeqId=1 2024-12-11T04:26:51,419 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4. 2024-12-11T04:26:51,419 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for c8c23c02526ae28f7a94d562fbd47bb4: 2024-12-11T04:26:51,421 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:51,422 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=c8c23c02526ae28f7a94d562fbd47bb4, regionState=CLOSED 2024-12-11T04:26:51,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-11T04:26:51,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure c8c23c02526ae28f7a94d562fbd47bb4, server=5f466b3719ec,39071,1733891180267 in 1.5270 sec 2024-12-11T04:26:51,426 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-11T04:26:51,426 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c8c23c02526ae28f7a94d562fbd47bb4, UNASSIGN in 1.5320 sec 2024-12-11T04:26:51,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-11T04:26:51,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5380 sec 2024-12-11T04:26:51,430 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891211430"}]},"ts":"1733891211430"} 2024-12-11T04:26:51,431 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-11T04:26:51,434 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-11T04:26:51,435 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5550 sec 2024-12-11T04:26:51,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-11T04:26:51,988 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-12-11T04:26:51,991 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-11T04:26:51,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:26:51,998 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=38, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:26:51,999 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=38, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:26:51,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-11T04:26:52,001 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:52,005 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/recovered.edits] 2024-12-11T04:26:52,009 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e5b45c171894447ea9904224777e8247 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/e5b45c171894447ea9904224777e8247 2024-12-11T04:26:52,009 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5520a0d1eb7443829d8f2527cb427e81 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/5520a0d1eb7443829d8f2527cb427e81 2024-12-11T04:26:52,009 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/f56c5fe0c5e0427882fc07511b0069fa to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/A/f56c5fe0c5e0427882fc07511b0069fa 2024-12-11T04:26:52,012 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/f201f7eec9cc4463a1dd9239163bbd54 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/f201f7eec9cc4463a1dd9239163bbd54 2024-12-11T04:26:52,012 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b426ba567d67497da879de1f7c43c8ef to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/b426ba567d67497da879de1f7c43c8ef 2024-12-11T04:26:52,012 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/207aa72b11324805afff50610f7b9772 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/B/207aa72b11324805afff50610f7b9772 2024-12-11T04:26:52,015 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6ce53b2ea98147a0ad112caf0547788f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/6ce53b2ea98147a0ad112caf0547788f 2024-12-11T04:26:52,016 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/f1efb52157954e429cc2c364b1bc181a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/f1efb52157954e429cc2c364b1bc181a 2024-12-11T04:26:52,016 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/d1cff00894b24c36900ac2f06dfd27b9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/C/d1cff00894b24c36900ac2f06dfd27b9 2024-12-11T04:26:52,019 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/recovered.edits/621.seqid to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4/recovered.edits/621.seqid 2024-12-11T04:26:52,019 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c8c23c02526ae28f7a94d562fbd47bb4 2024-12-11T04:26:52,019 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-11T04:26:52,024 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=38, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:26:52,029 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-11T04:26:52,032 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-11T04:26:52,062 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-11T04:26:52,063 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=38, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:26:52,064 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-11T04:26:52,064 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733891212064"}]},"ts":"9223372036854775807"} 2024-12-11T04:26:52,067 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-11T04:26:52,067 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => c8c23c02526ae28f7a94d562fbd47bb4, NAME => 'TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T04:26:52,067 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-11T04:26:52,067 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733891212067"}]},"ts":"9223372036854775807"} 2024-12-11T04:26:52,069 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-11T04:26:52,072 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=38, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:26:52,074 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 80 msec 2024-12-11T04:26:52,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-11T04:26:52,100 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-12-11T04:26:52,112 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=246 (was 219) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-905548659_22 at /127.0.0.1:57516 [Waiting for operation #440] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5132e113-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5132e113-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;5f466b3719ec:39071-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5132e113-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1459243960_22 at /127.0.0.1:43118 [Waiting for operation #430] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x5132e113-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=454 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=312 (was 134) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3737 (was 4255) 2024-12-11T04:26:52,122 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=246, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=312, ProcessCount=11, AvailableMemoryMB=3737 2024-12-11T04:26:52,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T04:26:52,124 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T04:26:52,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-11T04:26:52,126 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T04:26:52,126 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:52,126 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 39 2024-12-11T04:26:52,127 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T04:26:52,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-11T04:26:52,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741977_1153 (size=963) 2024-12-11T04:26:52,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-11T04:26:52,342 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T04:26:52,345 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T04:26:52,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-11T04:26:52,537 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5 2024-12-11T04:26:52,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741978_1154 (size=53) 2024-12-11T04:26:52,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-11T04:26:52,943 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:26:52,943 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 0d95822809793edddcee8d4c8425775e, disabling compactions & flushes 2024-12-11T04:26:52,943 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:52,943 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:52,943 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. after waiting 0 ms 2024-12-11T04:26:52,943 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:52,943 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:52,943 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:26:52,945 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T04:26:52,945 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733891212945"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733891212945"}]},"ts":"1733891212945"} 2024-12-11T04:26:52,946 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T04:26:52,947 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T04:26:52,947 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891212947"}]},"ts":"1733891212947"} 2024-12-11T04:26:52,948 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-11T04:26:52,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0d95822809793edddcee8d4c8425775e, ASSIGN}] 2024-12-11T04:26:52,954 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0d95822809793edddcee8d4c8425775e, ASSIGN 2024-12-11T04:26:52,954 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=0d95822809793edddcee8d4c8425775e, ASSIGN; state=OFFLINE, location=5f466b3719ec,39071,1733891180267; forceNewPlan=false, retain=false 2024-12-11T04:26:53,105 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=0d95822809793edddcee8d4c8425775e, regionState=OPENING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:53,107 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; OpenRegionProcedure 0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:26:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-11T04:26:53,258 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:53,262 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:53,262 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7285): Opening region: {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} 2024-12-11T04:26:53,262 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:53,262 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:26:53,263 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7327): checking encryption for 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:53,263 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(7330): checking classloading for 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:53,264 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:53,265 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:26:53,265 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d95822809793edddcee8d4c8425775e columnFamilyName A 2024-12-11T04:26:53,266 DEBUG [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:53,266 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.HStore(327): Store=0d95822809793edddcee8d4c8425775e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:53,266 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:53,267 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:26:53,268 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d95822809793edddcee8d4c8425775e columnFamilyName B 2024-12-11T04:26:53,268 DEBUG [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:53,268 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.HStore(327): Store=0d95822809793edddcee8d4c8425775e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:53,269 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:53,270 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:26:53,270 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d95822809793edddcee8d4c8425775e columnFamilyName C 2024-12-11T04:26:53,270 DEBUG [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:53,270 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.HStore(327): Store=0d95822809793edddcee8d4c8425775e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:53,270 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:53,271 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:53,271 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:53,273 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T04:26:53,274 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1085): writing seq id for 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:53,276 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T04:26:53,276 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1102): Opened 0d95822809793edddcee8d4c8425775e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66656784, jitterRate=-0.0067365169525146484}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T04:26:53,277 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegion(1001): Region open journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:26:53,278 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., pid=41, masterSystemTime=1733891213258 2024-12-11T04:26:53,279 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:53,279 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=41}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:53,280 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=0d95822809793edddcee8d4c8425775e, regionState=OPEN, openSeqNum=2, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:53,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-12-11T04:26:53,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; OpenRegionProcedure 0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 in 174 msec 2024-12-11T04:26:53,285 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-11T04:26:53,285 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0d95822809793edddcee8d4c8425775e, ASSIGN in 330 msec 2024-12-11T04:26:53,285 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T04:26:53,286 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891213286"}]},"ts":"1733891213286"} 2024-12-11T04:26:53,287 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-11T04:26:53,290 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=39, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T04:26:53,292 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1660 sec 2024-12-11T04:26:54,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-11T04:26:54,233 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-12-11T04:26:54,235 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cca453a to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@350b322d 2024-12-11T04:26:54,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26401a5f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:54,241 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:54,243 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35412, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:54,244 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T04:26:54,246 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36112, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T04:26:54,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T04:26:54,251 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T04:26:54,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=42, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-11T04:26:54,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741979_1155 (size=999) 2024-12-11T04:26:54,671 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-11T04:26:54,671 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-11T04:26:54,674 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T04:26:54,682 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0d95822809793edddcee8d4c8425775e, REOPEN/MOVE}] 2024-12-11T04:26:54,683 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0d95822809793edddcee8d4c8425775e, REOPEN/MOVE 2024-12-11T04:26:54,683 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=0d95822809793edddcee8d4c8425775e, regionState=CLOSING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:54,684 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T04:26:54,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE; CloseRegionProcedure 0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:26:54,836 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:54,836 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(124): Close 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:54,836 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T04:26:54,837 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1681): Closing 0d95822809793edddcee8d4c8425775e, disabling compactions & flushes 2024-12-11T04:26:54,837 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:54,837 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:54,837 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. after waiting 0 ms 2024-12-11T04:26:54,837 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:54,841 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-11T04:26:54,842 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:54,842 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegion(1635): Region close journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:26:54,842 WARN [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] regionserver.HRegionServer(3786): Not adding moved region record: 0d95822809793edddcee8d4c8425775e to self. 2024-12-11T04:26:54,843 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=45}] handler.UnassignRegionHandler(170): Closed 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:54,844 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=0d95822809793edddcee8d4c8425775e, regionState=CLOSED 2024-12-11T04:26:54,846 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-12-11T04:26:54,846 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; CloseRegionProcedure 0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 in 161 msec 2024-12-11T04:26:54,846 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=0d95822809793edddcee8d4c8425775e, REOPEN/MOVE; state=CLOSED, location=5f466b3719ec,39071,1733891180267; forceNewPlan=false, retain=true 2024-12-11T04:26:54,997 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=0d95822809793edddcee8d4c8425775e, regionState=OPENING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:54,998 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=44, state=RUNNABLE; OpenRegionProcedure 0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:26:55,150 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,153 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:55,153 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7285): Opening region: {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} 2024-12-11T04:26:55,153 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:55,154 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:26:55,154 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7327): checking encryption for 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:55,154 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(7330): checking classloading for 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:55,156 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:55,157 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:26:55,162 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d95822809793edddcee8d4c8425775e columnFamilyName A 2024-12-11T04:26:55,164 DEBUG [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:55,164 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.HStore(327): Store=0d95822809793edddcee8d4c8425775e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:55,165 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:55,165 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:26:55,166 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d95822809793edddcee8d4c8425775e columnFamilyName B 2024-12-11T04:26:55,166 DEBUG [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:55,166 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.HStore(327): Store=0d95822809793edddcee8d4c8425775e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:55,166 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:55,167 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:26:55,167 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d95822809793edddcee8d4c8425775e columnFamilyName C 2024-12-11T04:26:55,167 DEBUG [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:55,168 INFO [StoreOpener-0d95822809793edddcee8d4c8425775e-1 {}] regionserver.HStore(327): Store=0d95822809793edddcee8d4c8425775e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:26:55,168 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:55,169 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:55,170 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:55,171 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T04:26:55,172 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1085): writing seq id for 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:55,173 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1102): Opened 0d95822809793edddcee8d4c8425775e; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75460437, jitterRate=0.12444813549518585}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T04:26:55,176 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegion(1001): Region open journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:26:55,177 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., pid=46, masterSystemTime=1733891215150 2024-12-11T04:26:55,179 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:55,179 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=46}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:55,179 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=0d95822809793edddcee8d4c8425775e, regionState=OPEN, openSeqNum=5, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,182 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=44 2024-12-11T04:26:55,182 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=44, state=SUCCESS; OpenRegionProcedure 0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 in 183 msec 2024-12-11T04:26:55,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-11T04:26:55,184 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0d95822809793edddcee8d4c8425775e, REOPEN/MOVE in 500 msec 2024-12-11T04:26:55,186 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-12-11T04:26:55,186 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 512 msec 2024-12-11T04:26:55,190 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 934 msec 2024-12-11T04:26:55,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-12-11T04:26:55,200 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x433e2b26 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7bad2e85 2024-12-11T04:26:55,206 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@491ea2ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:55,207 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x176c5c1b to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@328f994d 2024-12-11T04:26:55,211 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b44b1e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:55,212 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x24f64590 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@19a533a3 2024-12-11T04:26:55,215 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42e904d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:55,216 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c5c4716 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@465dc764 2024-12-11T04:26:55,219 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2885d2d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:55,220 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68f0be85 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@247c0c93 2024-12-11T04:26:55,223 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22e911df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:55,224 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x517ff977 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b727d6e 2024-12-11T04:26:55,227 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c16cd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:55,227 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3448d233 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7940d9 2024-12-11T04:26:55,230 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341384e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:55,231 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a11164b to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c38ee58 2024-12-11T04:26:55,234 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26b120d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:55,235 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08a7e1dd to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@630684bf 2024-12-11T04:26:55,238 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c1ec7ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:26:55,241 DEBUG [hconnection-0x57c53fe4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:55,242 DEBUG [hconnection-0x553fba0b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:55,242 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:55,242 DEBUG [hconnection-0x3627e90e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:55,243 DEBUG [hconnection-0x623f48b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:55,243 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35418, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:55,244 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:55,244 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35450, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:55,244 DEBUG [hconnection-0x6ebb124e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:55,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-11T04:26:55,244 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35428, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:55,245 DEBUG [hconnection-0x420eadaa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:55,245 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35454, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:55,246 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:55,246 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35456, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:55,246 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:55,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:55,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T04:26:55,250 DEBUG [hconnection-0x3f96faad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:55,250 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:55,255 DEBUG [hconnection-0x78b8fca0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:55,255 DEBUG [hconnection-0x13563c7b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:26:55,256 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35484, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:55,260 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35470, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:26:55,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:55,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:26:55,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:26:55,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:55,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:26:55,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:55,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:26:55,266 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:55,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121124ac81f825734ad18b06f760957dc8b4_0d95822809793edddcee8d4c8425775e is 50, key is test_row_1/A:col10/1733891215260/Put/seqid=0 2024-12-11T04:26:55,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891275318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891275325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891275326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891275327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891275327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T04:26:55,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741980_1156 (size=9714) 2024-12-11T04:26:55,368 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:55,381 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121124ac81f825734ad18b06f760957dc8b4_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121124ac81f825734ad18b06f760957dc8b4_0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:55,382 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/41d1e8a514224734baf0c8eddd03d60b, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:26:55,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/41d1e8a514224734baf0c8eddd03d60b is 175, key is test_row_1/A:col10/1733891215260/Put/seqid=0 2024-12-11T04:26:55,398 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T04:26:55,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:55,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:55,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:55,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:55,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:55,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:55,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741981_1157 (size=22361) 2024-12-11T04:26:55,423 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/41d1e8a514224734baf0c8eddd03d60b 2024-12-11T04:26:55,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891275430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891275433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891275435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891275436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891275436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,455 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/5a701796440c407e9b0190a20662fd73 is 50, key is test_row_1/B:col10/1733891215260/Put/seqid=0 2024-12-11T04:26:55,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741982_1158 (size=9657) 2024-12-11T04:26:55,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/5a701796440c407e9b0190a20662fd73 2024-12-11T04:26:55,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/a1522fa48c484388aeae940e234e3686 is 50, key is test_row_1/C:col10/1733891215260/Put/seqid=0 2024-12-11T04:26:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T04:26:55,552 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,553 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T04:26:55,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:55,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:55,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:55,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:55,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:55,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741983_1159 (size=9657) 2024-12-11T04:26:55,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891275640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891275640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891275644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891275645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891275648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,706 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T04:26:55,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:55,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:55,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:55,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:55,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:55,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:55,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T04:26:55,860 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T04:26:55,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:55,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:55,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:55,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:55,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:55,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:55,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891275944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891275945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891275948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891275950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:55,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891275952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:55,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/a1522fa48c484388aeae940e234e3686 2024-12-11T04:26:55,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/41d1e8a514224734baf0c8eddd03d60b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/41d1e8a514224734baf0c8eddd03d60b 2024-12-11T04:26:55,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/41d1e8a514224734baf0c8eddd03d60b, entries=100, sequenceid=16, filesize=21.8 K 2024-12-11T04:26:55,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/5a701796440c407e9b0190a20662fd73 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/5a701796440c407e9b0190a20662fd73 2024-12-11T04:26:55,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/5a701796440c407e9b0190a20662fd73, entries=100, sequenceid=16, filesize=9.4 K 2024-12-11T04:26:55,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/a1522fa48c484388aeae940e234e3686 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/a1522fa48c484388aeae940e234e3686 2024-12-11T04:26:55,995 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/a1522fa48c484388aeae940e234e3686, entries=100, sequenceid=16, filesize=9.4 K 2024-12-11T04:26:55,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=154.31 KB/158010 for 0d95822809793edddcee8d4c8425775e in 734ms, sequenceid=16, compaction requested=false 2024-12-11T04:26:55,997 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-11T04:26:56,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:26:56,014 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,014 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-11T04:26:56,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:56,015 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-11T04:26:56,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:26:56,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:56,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:26:56,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:56,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:26:56,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:56,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211ca6d2946b9d14c56990dcdb5390348d2_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891215307/Put/seqid=0 2024-12-11T04:26:56,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741984_1160 (size=12154) 2024-12-11T04:26:56,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:56,073 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211ca6d2946b9d14c56990dcdb5390348d2_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211ca6d2946b9d14c56990dcdb5390348d2_0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:56,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/b8f776a81ef04fdd9cba2406e3b5679b, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:26:56,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/b8f776a81ef04fdd9cba2406e3b5679b is 175, key is test_row_0/A:col10/1733891215307/Put/seqid=0 2024-12-11T04:26:56,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741985_1161 (size=30955) 2024-12-11T04:26:56,113 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/b8f776a81ef04fdd9cba2406e3b5679b 2024-12-11T04:26:56,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/32e6d222382b4effadc89afdde7c0a3f is 50, key is test_row_0/B:col10/1733891215307/Put/seqid=0 2024-12-11T04:26:56,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741986_1162 (size=12001) 2024-12-11T04:26:56,182 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/32e6d222382b4effadc89afdde7c0a3f 2024-12-11T04:26:56,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/078a1c850af34b85b5752a6a3ac4adee is 50, key is test_row_0/C:col10/1733891215307/Put/seqid=0 2024-12-11T04:26:56,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741987_1163 (size=12001) 2024-12-11T04:26:56,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T04:26:56,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:56,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:56,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891276463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891276464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891276469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891276469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891276469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891276575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891276575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891276576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891276576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891276577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,651 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/078a1c850af34b85b5752a6a3ac4adee 2024-12-11T04:26:56,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/b8f776a81ef04fdd9cba2406e3b5679b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b8f776a81ef04fdd9cba2406e3b5679b 2024-12-11T04:26:56,670 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b8f776a81ef04fdd9cba2406e3b5679b, entries=150, sequenceid=42, filesize=30.2 K 2024-12-11T04:26:56,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/32e6d222382b4effadc89afdde7c0a3f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/32e6d222382b4effadc89afdde7c0a3f 2024-12-11T04:26:56,682 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/32e6d222382b4effadc89afdde7c0a3f, entries=150, sequenceid=42, filesize=11.7 K 2024-12-11T04:26:56,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/078a1c850af34b85b5752a6a3ac4adee as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/078a1c850af34b85b5752a6a3ac4adee 2024-12-11T04:26:56,694 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/078a1c850af34b85b5752a6a3ac4adee, entries=150, sequenceid=42, filesize=11.7 K 2024-12-11T04:26:56,697 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 0d95822809793edddcee8d4c8425775e in 682ms, sequenceid=42, compaction requested=false 2024-12-11T04:26:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:26:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:56,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-11T04:26:56,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-11T04:26:56,700 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-11T04:26:56,701 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4530 sec 2024-12-11T04:26:56,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.4590 sec 2024-12-11T04:26:56,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:56,785 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T04:26:56,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:26:56,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:56,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:26:56,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:56,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:26:56,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:56,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211436ab78aed0a413e92a2419149231212_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891216785/Put/seqid=0 2024-12-11T04:26:56,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741988_1164 (size=17034) 2024-12-11T04:26:56,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891276817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891276817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,832 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:56,837 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211436ab78aed0a413e92a2419149231212_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211436ab78aed0a413e92a2419149231212_0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:56,838 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,838 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/4f6c9becba1c4f4aaf4a6b5f6637c684, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:26:56,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891276825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891276826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891276826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,839 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/4f6c9becba1c4f4aaf4a6b5f6637c684 is 175, key is test_row_0/A:col10/1733891216785/Put/seqid=0 2024-12-11T04:26:56,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741989_1165 (size=48139) 2024-12-11T04:26:56,864 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=59, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/4f6c9becba1c4f4aaf4a6b5f6637c684 2024-12-11T04:26:56,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/f589fe2180934ebfba3de339f165cb78 is 50, key is test_row_0/B:col10/1733891216785/Put/seqid=0 2024-12-11T04:26:56,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741990_1166 (size=12001) 2024-12-11T04:26:56,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/f589fe2180934ebfba3de339f165cb78 2024-12-11T04:26:56,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/5f6fa41e8d93487ba38ce9b3ceafb846 is 50, key is test_row_0/C:col10/1733891216785/Put/seqid=0 2024-12-11T04:26:56,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891276928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891276928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741991_1167 (size=12001) 2024-12-11T04:26:56,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891276941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891276941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:56,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891276941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:56,984 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T04:26:57,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891277130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891277134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891277143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891277144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891277147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/5f6fa41e8d93487ba38ce9b3ceafb846 2024-12-11T04:26:57,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/4f6c9becba1c4f4aaf4a6b5f6637c684 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/4f6c9becba1c4f4aaf4a6b5f6637c684 2024-12-11T04:26:57,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/4f6c9becba1c4f4aaf4a6b5f6637c684, entries=250, sequenceid=59, filesize=47.0 K 2024-12-11T04:26:57,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/f589fe2180934ebfba3de339f165cb78 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/f589fe2180934ebfba3de339f165cb78 2024-12-11T04:26:57,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-11T04:26:57,356 INFO [Thread-752 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-11T04:26:57,357 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/f589fe2180934ebfba3de339f165cb78, entries=150, sequenceid=59, filesize=11.7 K 2024-12-11T04:26:57,357 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:57,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/5f6fa41e8d93487ba38ce9b3ceafb846 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/5f6fa41e8d93487ba38ce9b3ceafb846 2024-12-11T04:26:57,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-11T04:26:57,360 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:57,361 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:57,361 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:57,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-11T04:26:57,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/5f6fa41e8d93487ba38ce9b3ceafb846, entries=150, sequenceid=59, filesize=11.7 K 2024-12-11T04:26:57,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 0d95822809793edddcee8d4c8425775e in 582ms, sequenceid=59, compaction requested=true 2024-12-11T04:26:57,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:26:57,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:26:57,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:57,368 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:57,368 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:57,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:26:57,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:57,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:26:57,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:57,370 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101455 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:57,370 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/A is initiating minor compaction (all files) 2024-12-11T04:26:57,370 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/A in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:57,370 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/41d1e8a514224734baf0c8eddd03d60b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b8f776a81ef04fdd9cba2406e3b5679b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/4f6c9becba1c4f4aaf4a6b5f6637c684] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=99.1 K 2024-12-11T04:26:57,370 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:57,370 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/41d1e8a514224734baf0c8eddd03d60b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b8f776a81ef04fdd9cba2406e3b5679b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/4f6c9becba1c4f4aaf4a6b5f6637c684] 2024-12-11T04:26:57,371 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41d1e8a514224734baf0c8eddd03d60b, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733891215256 2024-12-11T04:26:57,371 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:57,371 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/B is initiating minor compaction (all files) 2024-12-11T04:26:57,371 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/B in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:57,371 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/5a701796440c407e9b0190a20662fd73, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/32e6d222382b4effadc89afdde7c0a3f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/f589fe2180934ebfba3de339f165cb78] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=32.9 K 2024-12-11T04:26:57,372 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a701796440c407e9b0190a20662fd73, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733891215256 2024-12-11T04:26:57,372 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting b8f776a81ef04fdd9cba2406e3b5679b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733891215307 2024-12-11T04:26:57,373 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 32e6d222382b4effadc89afdde7c0a3f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733891215307 2024-12-11T04:26:57,373 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f6c9becba1c4f4aaf4a6b5f6637c684, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=59, earliestPutTs=1733891216463 2024-12-11T04:26:57,374 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting f589fe2180934ebfba3de339f165cb78, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=59, earliestPutTs=1733891216463 2024-12-11T04:26:57,397 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:26:57,398 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#B#compaction#147 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:57,399 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/05315822d1914dd6b2847d834ff6891d is 50, key is test_row_0/B:col10/1733891216785/Put/seqid=0 2024-12-11T04:26:57,400 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412115b516272c2794746b1e73f12f8fad9b8_0d95822809793edddcee8d4c8425775e store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:26:57,410 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412115b516272c2794746b1e73f12f8fad9b8_0d95822809793edddcee8d4c8425775e, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:26:57,410 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115b516272c2794746b1e73f12f8fad9b8_0d95822809793edddcee8d4c8425775e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:26:57,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:57,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-11T04:26:57,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:26:57,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:57,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:26:57,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:57,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:26:57,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:57,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-11T04:26:57,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741992_1168 (size=12104) 2024-12-11T04:26:57,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741993_1169 (size=4469) 2024-12-11T04:26:57,472 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#A#compaction#148 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:57,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891277466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891277470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,474 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/efd67f94c2d64d3a9721e28395bc75e7 is 175, key is test_row_0/A:col10/1733891216785/Put/seqid=0 2024-12-11T04:26:57,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891277471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891277473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891277473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121117bd3d8a0e174d80b3aebd815a7bd8e3_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891217443/Put/seqid=0 2024-12-11T04:26:57,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741994_1170 (size=31058) 2024-12-11T04:26:57,513 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-11T04:26:57,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:57,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:57,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:57,515 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:57,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:57,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:57,518 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/efd67f94c2d64d3a9721e28395bc75e7 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/efd67f94c2d64d3a9721e28395bc75e7 2024-12-11T04:26:57,527 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d95822809793edddcee8d4c8425775e/A of 0d95822809793edddcee8d4c8425775e into efd67f94c2d64d3a9721e28395bc75e7(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:57,527 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:26:57,528 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/A, priority=13, startTime=1733891217368; duration=0sec 2024-12-11T04:26:57,528 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:26:57,528 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:A 2024-12-11T04:26:57,528 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:26:57,530 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:26:57,530 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/C is initiating minor compaction (all files) 2024-12-11T04:26:57,530 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/C in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:57,530 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/a1522fa48c484388aeae940e234e3686, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/078a1c850af34b85b5752a6a3ac4adee, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/5f6fa41e8d93487ba38ce9b3ceafb846] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=32.9 K 2024-12-11T04:26:57,532 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1522fa48c484388aeae940e234e3686, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733891215256 2024-12-11T04:26:57,533 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 078a1c850af34b85b5752a6a3ac4adee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733891215307 2024-12-11T04:26:57,533 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f6fa41e8d93487ba38ce9b3ceafb846, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=59, earliestPutTs=1733891216463 2024-12-11T04:26:57,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741995_1171 (size=14594) 2024-12-11T04:26:57,546 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#C#compaction#150 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:26:57,547 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/3c1c6bcf3c1245cfb11742d0560a28ab is 50, key is test_row_0/C:col10/1733891216785/Put/seqid=0 2024-12-11T04:26:57,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891277575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891277575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891277579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891277579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891277585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741996_1172 (size=12104) 2024-12-11T04:26:57,617 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/3c1c6bcf3c1245cfb11742d0560a28ab as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/3c1c6bcf3c1245cfb11742d0560a28ab 2024-12-11T04:26:57,625 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d95822809793edddcee8d4c8425775e/C of 0d95822809793edddcee8d4c8425775e into 3c1c6bcf3c1245cfb11742d0560a28ab(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:57,625 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:26:57,625 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/C, priority=13, startTime=1733891217368; duration=0sec 2024-12-11T04:26:57,625 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:57,625 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:C 2024-12-11T04:26:57,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-11T04:26:57,669 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-11T04:26:57,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:57,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:57,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:57,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:57,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:57,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:57,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891277783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891277783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891277785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891277787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891277789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,822 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-11T04:26:57,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:57,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:57,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:57,823 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:57,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:57,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:57,874 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/05315822d1914dd6b2847d834ff6891d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/05315822d1914dd6b2847d834ff6891d 2024-12-11T04:26:57,879 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d95822809793edddcee8d4c8425775e/B of 0d95822809793edddcee8d4c8425775e into 05315822d1914dd6b2847d834ff6891d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:26:57,879 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:26:57,879 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/B, priority=13, startTime=1733891217368; duration=0sec 2024-12-11T04:26:57,879 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:26:57,879 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:B 2024-12-11T04:26:57,936 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:57,940 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121117bd3d8a0e174d80b3aebd815a7bd8e3_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121117bd3d8a0e174d80b3aebd815a7bd8e3_0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:57,942 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/feaf3392aa884218aef7077c446a1852, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:26:57,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/feaf3392aa884218aef7077c446a1852 is 175, key is test_row_0/A:col10/1733891217443/Put/seqid=0 2024-12-11T04:26:57,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741997_1173 (size=39549) 2024-12-11T04:26:57,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-11T04:26:57,976 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:57,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-11T04:26:57,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:57,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:57,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:57,977 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:57,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:57,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891278086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:58,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891278088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:58,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891278089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:58,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891278093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:58,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891278093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,130 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-11T04:26:58,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:58,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:58,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:58,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,283 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-11T04:26:58,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:58,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:58,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:58,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,348 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=81, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/feaf3392aa884218aef7077c446a1852 2024-12-11T04:26:58,357 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/2da7b262b6e64efbbf4fa8aa51ff7347 is 50, key is test_row_0/B:col10/1733891217443/Put/seqid=0 2024-12-11T04:26:58,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741998_1174 (size=12001) 2024-12-11T04:26:58,365 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/2da7b262b6e64efbbf4fa8aa51ff7347 2024-12-11T04:26:58,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/092ca351e067450f89dcf623dbb93f2a is 50, key is test_row_0/C:col10/1733891217443/Put/seqid=0 2024-12-11T04:26:58,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741999_1175 (size=12001) 2024-12-11T04:26:58,439 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-11T04:26:58,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:58,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:58,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:58,442 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-11T04:26:58,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:58,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891278589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,595 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-11T04:26:58,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:58,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:58,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:58,596 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:58,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891278595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:58,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891278596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:58,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891278596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,598 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:58,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891278597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,750 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-11T04:26:58,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:58,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:58,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:58,751 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:26:58,788 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/092ca351e067450f89dcf623dbb93f2a 2024-12-11T04:26:58,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/feaf3392aa884218aef7077c446a1852 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/feaf3392aa884218aef7077c446a1852 2024-12-11T04:26:58,803 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/feaf3392aa884218aef7077c446a1852, entries=200, sequenceid=81, filesize=38.6 K 2024-12-11T04:26:58,804 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/2da7b262b6e64efbbf4fa8aa51ff7347 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/2da7b262b6e64efbbf4fa8aa51ff7347 2024-12-11T04:26:58,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/2da7b262b6e64efbbf4fa8aa51ff7347, entries=150, sequenceid=81, filesize=11.7 K 2024-12-11T04:26:58,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/092ca351e067450f89dcf623dbb93f2a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/092ca351e067450f89dcf623dbb93f2a 2024-12-11T04:26:58,820 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/092ca351e067450f89dcf623dbb93f2a, entries=150, sequenceid=81, filesize=11.7 K 2024-12-11T04:26:58,823 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 0d95822809793edddcee8d4c8425775e in 1380ms, sequenceid=81, compaction requested=false 2024-12-11T04:26:58,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:26:58,903 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:58,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-11T04:26:58,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:58,904 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T04:26:58,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:26:58,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:58,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:26:58,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:58,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:26:58,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:58,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412114107cb558c8b4611b0c425d54ee5818b_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891217469/Put/seqid=0 2024-12-11T04:26:58,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742000_1176 (size=12154) 2024-12-11T04:26:58,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:58,926 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412114107cb558c8b4611b0c425d54ee5818b_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114107cb558c8b4611b0c425d54ee5818b_0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:58,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/0bd82e1ebe194af9853c25db2f10dde8, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:26:58,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/0bd82e1ebe194af9853c25db2f10dde8 is 175, key is test_row_0/A:col10/1733891217469/Put/seqid=0 2024-12-11T04:26:58,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742001_1177 (size=30955) 2024-12-11T04:26:58,936 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=99, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/0bd82e1ebe194af9853c25db2f10dde8 2024-12-11T04:26:58,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/ccfed7cc4c514dc5bb7b02d65504bc45 is 50, key is test_row_0/B:col10/1733891217469/Put/seqid=0 2024-12-11T04:26:58,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742002_1178 (size=12001) 2024-12-11T04:26:59,351 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/ccfed7cc4c514dc5bb7b02d65504bc45 2024-12-11T04:26:59,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/53104bda3cb44bb192909fb999c2a2db is 50, key is test_row_0/C:col10/1733891217469/Put/seqid=0 2024-12-11T04:26:59,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742003_1179 (size=12001) 2024-12-11T04:26:59,373 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/53104bda3cb44bb192909fb999c2a2db 2024-12-11T04:26:59,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/0bd82e1ebe194af9853c25db2f10dde8 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/0bd82e1ebe194af9853c25db2f10dde8 2024-12-11T04:26:59,383 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/0bd82e1ebe194af9853c25db2f10dde8, entries=150, sequenceid=99, filesize=30.2 K 2024-12-11T04:26:59,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/ccfed7cc4c514dc5bb7b02d65504bc45 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ccfed7cc4c514dc5bb7b02d65504bc45 2024-12-11T04:26:59,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,390 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ccfed7cc4c514dc5bb7b02d65504bc45, entries=150, sequenceid=99, filesize=11.7 K 2024-12-11T04:26:59,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/53104bda3cb44bb192909fb999c2a2db as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/53104bda3cb44bb192909fb999c2a2db 2024-12-11T04:26:59,395 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/53104bda3cb44bb192909fb999c2a2db, entries=150, sequenceid=99, filesize=11.7 K 2024-12-11T04:26:59,396 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=0 B/0 for 0d95822809793edddcee8d4c8425775e in 492ms, sequenceid=99, compaction requested=true 2024-12-11T04:26:59,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:26:59,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:59,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-11T04:26:59,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-11T04:26:59,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,400 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-11T04:26:59,400 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0380 sec 2024-12-11T04:26:59,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,402 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.0430 sec 2024-12-11T04:26:59,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-11T04:26:59,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,468 INFO [Thread-752 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-11T04:26:59,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,470 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:26:59,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-11T04:26:59,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,472 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:26:59,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,473 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:26:59,473 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:26:59,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-11T04:26:59,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-11T04:26:59,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,625 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:26:59,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-11T04:26:59,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:26:59,626 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-11T04:26:59,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:26:59,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:59,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:26:59,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:59,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:26:59,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:26:59,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211019dcc6b9cad464db788b57c69f2db76_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891219616/Put/seqid=0 2024-12-11T04:26:59,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:59,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:26:59,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742004_1180 (size=17034) 2024-12-11T04:26:59,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,667 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211019dcc6b9cad464db788b57c69f2db76_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211019dcc6b9cad464db788b57c69f2db76_0d95822809793edddcee8d4c8425775e 2024-12-11T04:26:59,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/3445aab6619542ee9e4278f4a36e94d3, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:26:59,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/3445aab6619542ee9e4278f4a36e94d3 is 175, key is test_row_0/A:col10/1733891219616/Put/seqid=0 2024-12-11T04:26:59,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742005_1181 (size=48139) 2024-12-11T04:26:59,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:26:59,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891279753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:59,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891279753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:59,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:59,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:59,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891279759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:59,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:59,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891279760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891279757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-11T04:26:59,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:59,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891279861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891279862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:59,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891279866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:59,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891279866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:26:59,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:26:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891279867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891280066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891280067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891280069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891280069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891280069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-11T04:27:00,096 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=107, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/3445aab6619542ee9e4278f4a36e94d3 2024-12-11T04:27:00,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/fcb1fe402f274755ac2cd971bab76cdf is 50, key is test_row_0/B:col10/1733891219616/Put/seqid=0 2024-12-11T04:27:00,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742006_1182 (size=12001) 2024-12-11T04:27:00,122 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=107 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/fcb1fe402f274755ac2cd971bab76cdf 2024-12-11T04:27:00,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/5387b468400b4091b50b63d58edce9f5 is 50, key is test_row_0/C:col10/1733891219616/Put/seqid=0 2024-12-11T04:27:00,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742007_1183 (size=12001) 2024-12-11T04:27:00,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891280371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891280372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891280373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891280374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891280375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,539 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=107 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/5387b468400b4091b50b63d58edce9f5 2024-12-11T04:27:00,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/3445aab6619542ee9e4278f4a36e94d3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/3445aab6619542ee9e4278f4a36e94d3 2024-12-11T04:27:00,553 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/3445aab6619542ee9e4278f4a36e94d3, entries=250, sequenceid=107, filesize=47.0 K 2024-12-11T04:27:00,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/fcb1fe402f274755ac2cd971bab76cdf as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/fcb1fe402f274755ac2cd971bab76cdf 2024-12-11T04:27:00,561 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/fcb1fe402f274755ac2cd971bab76cdf, entries=150, sequenceid=107, filesize=11.7 K 2024-12-11T04:27:00,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/5387b468400b4091b50b63d58edce9f5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/5387b468400b4091b50b63d58edce9f5 2024-12-11T04:27:00,568 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/5387b468400b4091b50b63d58edce9f5, entries=150, sequenceid=107, filesize=11.7 K 2024-12-11T04:27:00,570 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 0d95822809793edddcee8d4c8425775e in 944ms, sequenceid=107, compaction requested=true 2024-12-11T04:27:00,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:00,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:00,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-11T04:27:00,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-11T04:27:00,573 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-11T04:27:00,573 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0980 sec 2024-12-11T04:27:00,574 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.1030 sec 2024-12-11T04:27:00,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-11T04:27:00,577 INFO [Thread-752 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-11T04:27:00,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:00,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-11T04:27:00,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-11T04:27:00,581 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:00,581 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:00,581 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:00,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-11T04:27:00,734 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-11T04:27:00,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:00,735 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-11T04:27:00,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:00,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:00,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:00,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:00,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:00,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:00,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121124374714dad44b2e9dc8612c79b36c91_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891219751/Put/seqid=0 2024-12-11T04:27:00,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742008_1184 (size=12304) 2024-12-11T04:27:00,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:00,770 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121124374714dad44b2e9dc8612c79b36c91_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121124374714dad44b2e9dc8612c79b36c91_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:00,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/34c92aabc14541869e1fcdb85393dc55, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:00,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/34c92aabc14541869e1fcdb85393dc55 is 175, key is test_row_0/A:col10/1733891219751/Put/seqid=0 2024-12-11T04:27:00,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742009_1185 (size=31105) 2024-12-11T04:27:00,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:00,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:00,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-11T04:27:00,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891280881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891280880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891280883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891280884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891280884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891280986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891280987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891280988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:00,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:00,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891280988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-11T04:27:01,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891281188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891281191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891281193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891281193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,198 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/34c92aabc14541869e1fcdb85393dc55 2024-12-11T04:27:01,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/a359f058294e4187b85ad1cafe775903 is 50, key is test_row_0/B:col10/1733891219751/Put/seqid=0 2024-12-11T04:27:01,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742010_1186 (size=12151) 2024-12-11T04:27:01,264 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/a359f058294e4187b85ad1cafe775903 2024-12-11T04:27:01,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/1c796dd712a94fcf8599d73737c7a464 is 50, key is test_row_0/C:col10/1733891219751/Put/seqid=0 2024-12-11T04:27:01,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742011_1187 (size=12151) 2024-12-11T04:27:01,305 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/1c796dd712a94fcf8599d73737c7a464 2024-12-11T04:27:01,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/34c92aabc14541869e1fcdb85393dc55 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/34c92aabc14541869e1fcdb85393dc55 2024-12-11T04:27:01,316 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/34c92aabc14541869e1fcdb85393dc55, entries=150, sequenceid=135, filesize=30.4 K 2024-12-11T04:27:01,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/a359f058294e4187b85ad1cafe775903 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/a359f058294e4187b85ad1cafe775903 2024-12-11T04:27:01,347 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/a359f058294e4187b85ad1cafe775903, entries=150, sequenceid=135, filesize=11.9 K 2024-12-11T04:27:01,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/1c796dd712a94fcf8599d73737c7a464 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/1c796dd712a94fcf8599d73737c7a464 2024-12-11T04:27:01,354 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/1c796dd712a94fcf8599d73737c7a464, entries=150, sequenceid=135, filesize=11.9 K 2024-12-11T04:27:01,356 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 0d95822809793edddcee8d4c8425775e in 620ms, sequenceid=135, compaction requested=true 2024-12-11T04:27:01,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:01,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:01,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-11T04:27:01,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-11T04:27:01,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-11T04:27:01,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 777 msec 2024-12-11T04:27:01,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 782 msec 2024-12-11T04:27:01,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:01,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:27:01,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:01,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:01,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:01,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:01,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:01,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:01,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121186c99547f8b3402396c908e70fb2d6c0_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891220879/Put/seqid=0 2024-12-11T04:27:01,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742012_1188 (size=14794) 2024-12-11T04:27:01,543 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:01,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891281541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,546 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891281544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,550 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121186c99547f8b3402396c908e70fb2d6c0_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121186c99547f8b3402396c908e70fb2d6c0_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:01,551 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/faa20e9f5c0248b5bd836076bc2294f0, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:01,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/faa20e9f5c0248b5bd836076bc2294f0 is 175, key is test_row_0/A:col10/1733891220879/Put/seqid=0 2024-12-11T04:27:01,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891281547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891281547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742013_1189 (size=39749) 2024-12-11T04:27:01,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891281648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891281648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891281656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891281656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-11T04:27:01,684 INFO [Thread-752 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-11T04:27:01,686 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:01,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-11T04:27:01,688 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:01,689 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:01,689 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:01,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-11T04:27:01,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-11T04:27:01,841 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-11T04:27:01,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:01,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:01,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:01,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:01,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:01,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:01,851 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891281851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891281851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891281858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891281858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:01,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891281885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,966 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=147, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/faa20e9f5c0248b5bd836076bc2294f0 2024-12-11T04:27:01,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/67130e258d704f0f8120de2945338c61 is 50, key is test_row_0/B:col10/1733891220879/Put/seqid=0 2024-12-11T04:27:01,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-11T04:27:01,995 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:01,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-11T04:27:01,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:01,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:01,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:01,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:01,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:01,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:02,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742014_1190 (size=12151) 2024-12-11T04:27:02,007 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=147 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/67130e258d704f0f8120de2945338c61 2024-12-11T04:27:02,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/09d08f1128e34764b37405d10f895c46 is 50, key is test_row_0/C:col10/1733891220879/Put/seqid=0 2024-12-11T04:27:02,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742015_1191 (size=12151) 2024-12-11T04:27:02,023 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=147 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/09d08f1128e34764b37405d10f895c46 2024-12-11T04:27:02,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/faa20e9f5c0248b5bd836076bc2294f0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/faa20e9f5c0248b5bd836076bc2294f0 2024-12-11T04:27:02,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/faa20e9f5c0248b5bd836076bc2294f0, entries=200, sequenceid=147, filesize=38.8 K 2024-12-11T04:27:02,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/67130e258d704f0f8120de2945338c61 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/67130e258d704f0f8120de2945338c61 2024-12-11T04:27:02,038 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/67130e258d704f0f8120de2945338c61, entries=150, sequenceid=147, filesize=11.9 K 2024-12-11T04:27:02,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/09d08f1128e34764b37405d10f895c46 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/09d08f1128e34764b37405d10f895c46 2024-12-11T04:27:02,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/09d08f1128e34764b37405d10f895c46, entries=150, sequenceid=147, filesize=11.9 K 2024-12-11T04:27:02,045 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 0d95822809793edddcee8d4c8425775e in 549ms, sequenceid=147, compaction requested=true 2024-12-11T04:27:02,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:02,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:02,045 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:02,045 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-11T04:27:02,045 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-11T04:27:02,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:02,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:02,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:02,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:02,048 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72409 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-11T04:27:02,048 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/B is initiating minor compaction (all files) 2024-12-11T04:27:02,049 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/B in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:02,049 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/05315822d1914dd6b2847d834ff6891d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/2da7b262b6e64efbbf4fa8aa51ff7347, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ccfed7cc4c514dc5bb7b02d65504bc45, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/fcb1fe402f274755ac2cd971bab76cdf, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/a359f058294e4187b85ad1cafe775903, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/67130e258d704f0f8120de2945338c61] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=70.7 K 2024-12-11T04:27:02,049 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 220555 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-11T04:27:02,049 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/A is initiating minor compaction (all files) 2024-12-11T04:27:02,049 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/A in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:02,049 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/efd67f94c2d64d3a9721e28395bc75e7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/feaf3392aa884218aef7077c446a1852, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/0bd82e1ebe194af9853c25db2f10dde8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/3445aab6619542ee9e4278f4a36e94d3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/34c92aabc14541869e1fcdb85393dc55, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/faa20e9f5c0248b5bd836076bc2294f0] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=215.4 K 2024-12-11T04:27:02,049 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=10 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:02,050 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/efd67f94c2d64d3a9721e28395bc75e7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/feaf3392aa884218aef7077c446a1852, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/0bd82e1ebe194af9853c25db2f10dde8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/3445aab6619542ee9e4278f4a36e94d3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/34c92aabc14541869e1fcdb85393dc55, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/faa20e9f5c0248b5bd836076bc2294f0] 2024-12-11T04:27:02,050 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 05315822d1914dd6b2847d834ff6891d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=59, earliestPutTs=1733891216463 2024-12-11T04:27:02,050 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting efd67f94c2d64d3a9721e28395bc75e7, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=59, earliestPutTs=1733891216463 2024-12-11T04:27:02,050 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 2da7b262b6e64efbbf4fa8aa51ff7347, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733891216824 2024-12-11T04:27:02,051 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting feaf3392aa884218aef7077c446a1852, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733891216819 2024-12-11T04:27:02,051 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ccfed7cc4c514dc5bb7b02d65504bc45, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1733891217468 2024-12-11T04:27:02,052 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bd82e1ebe194af9853c25db2f10dde8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1733891217468 2024-12-11T04:27:02,052 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting fcb1fe402f274755ac2cd971bab76cdf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=107, earliestPutTs=1733891219616 2024-12-11T04:27:02,052 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3445aab6619542ee9e4278f4a36e94d3, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=107, earliestPutTs=1733891219616 2024-12-11T04:27:02,053 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34c92aabc14541869e1fcdb85393dc55, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733891219751 2024-12-11T04:27:02,053 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting a359f058294e4187b85ad1cafe775903, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733891219751 2024-12-11T04:27:02,053 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting faa20e9f5c0248b5bd836076bc2294f0, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=147, earliestPutTs=1733891220879 2024-12-11T04:27:02,053 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 67130e258d704f0f8120de2945338c61, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=147, earliestPutTs=1733891220879 2024-12-11T04:27:02,081 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:02,090 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#B#compaction#166 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:02,091 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/d5b3bdff986c4f6f9d72c84280dee44b is 50, key is test_row_0/B:col10/1733891220879/Put/seqid=0 2024-12-11T04:27:02,091 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412110977f9069e0a4028ae60580765d9925e_0d95822809793edddcee8d4c8425775e store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:02,096 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412110977f9069e0a4028ae60580765d9925e_0d95822809793edddcee8d4c8425775e, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:02,096 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110977f9069e0a4028ae60580765d9925e_0d95822809793edddcee8d4c8425775e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:02,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742016_1192 (size=12459) 2024-12-11T04:27:02,123 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/d5b3bdff986c4f6f9d72c84280dee44b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/d5b3bdff986c4f6f9d72c84280dee44b 2024-12-11T04:27:02,128 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 0d95822809793edddcee8d4c8425775e/B of 0d95822809793edddcee8d4c8425775e into d5b3bdff986c4f6f9d72c84280dee44b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:02,128 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:02,128 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/B, priority=10, startTime=1733891222045; duration=0sec 2024-12-11T04:27:02,128 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:02,128 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:B 2024-12-11T04:27:02,128 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-11T04:27:02,130 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72409 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-11T04:27:02,130 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/C is initiating minor compaction (all files) 2024-12-11T04:27:02,130 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/C in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:02,130 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/3c1c6bcf3c1245cfb11742d0560a28ab, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/092ca351e067450f89dcf623dbb93f2a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/53104bda3cb44bb192909fb999c2a2db, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/5387b468400b4091b50b63d58edce9f5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/1c796dd712a94fcf8599d73737c7a464, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/09d08f1128e34764b37405d10f895c46] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=70.7 K 2024-12-11T04:27:02,131 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c1c6bcf3c1245cfb11742d0560a28ab, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=59, earliestPutTs=1733891216463 2024-12-11T04:27:02,132 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 092ca351e067450f89dcf623dbb93f2a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1733891216824 2024-12-11T04:27:02,133 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 53104bda3cb44bb192909fb999c2a2db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1733891217468 2024-12-11T04:27:02,133 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5387b468400b4091b50b63d58edce9f5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=107, earliestPutTs=1733891219616 2024-12-11T04:27:02,133 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c796dd712a94fcf8599d73737c7a464, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1733891219751 2024-12-11T04:27:02,134 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 09d08f1128e34764b37405d10f895c46, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=147, earliestPutTs=1733891220879 2024-12-11T04:27:02,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742017_1193 (size=4469) 2024-12-11T04:27:02,147 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#A#compaction#165 average throughput is 0.37 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:02,148 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/676e671cdce5456eb78f84e85cc07a24 is 175, key is test_row_0/A:col10/1733891220879/Put/seqid=0 2024-12-11T04:27:02,148 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-11T04:27:02,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:02,149 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T04:27:02,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:02,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:02,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:02,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:02,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:02,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:02,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:02,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:02,167 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#C#compaction#167 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:02,168 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/7075cfeae58a49a7a8b1bb23cda171f7 is 50, key is test_row_0/C:col10/1733891220879/Put/seqid=0 2024-12-11T04:27:02,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117784dafe06a6450fb6eb4ff440b17e67_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891221541/Put/seqid=0 2024-12-11T04:27:02,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742018_1194 (size=31413) 2024-12-11T04:27:02,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891282167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891282168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891282169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891282174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,184 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/676e671cdce5456eb78f84e85cc07a24 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/676e671cdce5456eb78f84e85cc07a24 2024-12-11T04:27:02,189 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 0d95822809793edddcee8d4c8425775e/A of 0d95822809793edddcee8d4c8425775e into 676e671cdce5456eb78f84e85cc07a24(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:02,189 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:02,189 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/A, priority=10, startTime=1733891222045; duration=0sec 2024-12-11T04:27:02,189 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:02,189 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:A 2024-12-11T04:27:02,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742019_1195 (size=12459) 2024-12-11T04:27:02,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742020_1196 (size=12304) 2024-12-11T04:27:02,214 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/7075cfeae58a49a7a8b1bb23cda171f7 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/7075cfeae58a49a7a8b1bb23cda171f7 2024-12-11T04:27:02,220 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 0d95822809793edddcee8d4c8425775e/C of 0d95822809793edddcee8d4c8425775e into 7075cfeae58a49a7a8b1bb23cda171f7(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:02,220 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:02,220 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/C, priority=10, startTime=1733891222046; duration=0sec 2024-12-11T04:27:02,220 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:02,220 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:C 2024-12-11T04:27:02,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891282278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,279 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891282278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891282279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-11T04:27:02,480 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891282480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891282481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891282482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,614 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117784dafe06a6450fb6eb4ff440b17e67_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117784dafe06a6450fb6eb4ff440b17e67_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:02,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/690c535e3c8f4f428d3de00a6875626e, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:02,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/690c535e3c8f4f428d3de00a6875626e is 175, key is test_row_0/A:col10/1733891221541/Put/seqid=0 2024-12-11T04:27:02,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742021_1197 (size=31105) 2024-12-11T04:27:02,648 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=173, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/690c535e3c8f4f428d3de00a6875626e 2024-12-11T04:27:02,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/407d671bf8f24bf89711f70c89e40c1a is 50, key is test_row_0/B:col10/1733891221541/Put/seqid=0 2024-12-11T04:27:02,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891282679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742022_1198 (size=12151) 2024-12-11T04:27:02,690 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/407d671bf8f24bf89711f70c89e40c1a 2024-12-11T04:27:02,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/9763c4c0977c4debaa9bb043fc73f005 is 50, key is test_row_0/C:col10/1733891221541/Put/seqid=0 2024-12-11T04:27:02,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742023_1199 (size=12151) 2024-12-11T04:27:02,751 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/9763c4c0977c4debaa9bb043fc73f005 2024-12-11T04:27:02,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/690c535e3c8f4f428d3de00a6875626e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/690c535e3c8f4f428d3de00a6875626e 2024-12-11T04:27:02,764 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/690c535e3c8f4f428d3de00a6875626e, entries=150, sequenceid=173, filesize=30.4 K 2024-12-11T04:27:02,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/407d671bf8f24bf89711f70c89e40c1a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/407d671bf8f24bf89711f70c89e40c1a 2024-12-11T04:27:02,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,772 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/407d671bf8f24bf89711f70c89e40c1a, entries=150, sequenceid=173, filesize=11.9 K 2024-12-11T04:27:02,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/9763c4c0977c4debaa9bb043fc73f005 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/9763c4c0977c4debaa9bb043fc73f005 2024-12-11T04:27:02,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,781 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/9763c4c0977c4debaa9bb043fc73f005, entries=150, sequenceid=173, filesize=11.9 K 2024-12-11T04:27:02,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,782 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 0d95822809793edddcee8d4c8425775e in 633ms, sequenceid=173, compaction requested=false 2024-12-11T04:27:02,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:02,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:02,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-11T04:27:02,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-11T04:27:02,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-11T04:27:02,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0950 sec 2024-12-11T04:27:02,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,788 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.1010 sec 2024-12-11T04:27:02,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-11T04:27:02,793 INFO [Thread-752 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-11T04:27:02,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:02,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-11T04:27:02,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,796 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:02,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T04:27:02,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,797 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:02,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,797 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:02,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:02,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,811 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T04:27:02,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:02,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:02,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:02,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:02,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:02,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:02,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,830 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211f2acef871c17416b88e35319e9db1f13_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891222159/Put/seqid=0 2024-12-11T04:27:02,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742024_1200 (size=12304) 2024-12-11T04:27:02,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,854 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,860 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211f2acef871c17416b88e35319e9db1f13_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211f2acef871c17416b88e35319e9db1f13_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:02,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,862 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/c81931684f2e4b9caeeeb5328e37963f, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:02,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/c81931684f2e4b9caeeeb5328e37963f is 175, key is test_row_0/A:col10/1733891222159/Put/seqid=0 2024-12-11T04:27:02,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:02,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742025_1201 (size=31101) 2024-12-11T04:27:02,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891282872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891282872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891282873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T04:27:02,948 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-11T04:27:02,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:02,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:02,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:02,949 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:02,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:02,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:02,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891282976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891282976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:02,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:02,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891282977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T04:27:03,101 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-11T04:27:03,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:03,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:03,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:03,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891283179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:03,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:03,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891283179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891283180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,254 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-11T04:27:03,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:03,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:03,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:03,255 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,271 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=187, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/c81931684f2e4b9caeeeb5328e37963f 2024-12-11T04:27:03,284 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/7ea9fa8655844084a613a6acb36e6e25 is 50, key is test_row_0/B:col10/1733891222159/Put/seqid=0 2024-12-11T04:27:03,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742026_1202 (size=9757) 2024-12-11T04:27:03,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T04:27:03,408 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-11T04:27:03,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:03,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:03,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:03,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:03,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891283481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:03,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891283483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:03,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891283484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,561 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-11T04:27:03,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:03,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:03,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:03,562 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:03,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891283685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,689 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/7ea9fa8655844084a613a6acb36e6e25 2024-12-11T04:27:03,705 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/539ebc8558c74a2db98c9dd0d529cfaa is 50, key is test_row_0/C:col10/1733891222159/Put/seqid=0 2024-12-11T04:27:03,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742027_1203 (size=9757) 2024-12-11T04:27:03,714 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,714 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-11T04:27:03,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:03,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:03,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:03,715 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,867 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-11T04:27:03,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:03,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:03,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:03,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:03,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:03,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891283895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,897 DEBUG [Thread-748 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., hostname=5f466b3719ec,39071,1733891180267, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T04:27:03,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T04:27:03,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:03,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891283984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:03,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891283987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:03,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891283992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:04,020 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:04,021 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-11T04:27:04,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:04,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:04,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:04,021 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:04,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:04,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:04,114 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/539ebc8558c74a2db98c9dd0d529cfaa 2024-12-11T04:27:04,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/c81931684f2e4b9caeeeb5328e37963f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c81931684f2e4b9caeeeb5328e37963f 2024-12-11T04:27:04,126 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c81931684f2e4b9caeeeb5328e37963f, entries=150, sequenceid=187, filesize=30.4 K 2024-12-11T04:27:04,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/7ea9fa8655844084a613a6acb36e6e25 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/7ea9fa8655844084a613a6acb36e6e25 2024-12-11T04:27:04,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/7ea9fa8655844084a613a6acb36e6e25, entries=100, sequenceid=187, filesize=9.5 K 2024-12-11T04:27:04,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/539ebc8558c74a2db98c9dd0d529cfaa as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/539ebc8558c74a2db98c9dd0d529cfaa 2024-12-11T04:27:04,139 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/539ebc8558c74a2db98c9dd0d529cfaa, entries=100, sequenceid=187, filesize=9.5 K 2024-12-11T04:27:04,140 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 0d95822809793edddcee8d4c8425775e in 1329ms, sequenceid=187, compaction requested=true 2024-12-11T04:27:04,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:04,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:04,140 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:04,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:04,140 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:04,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:04,140 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:04,141 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:04,141 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/A is initiating minor compaction (all files) 2024-12-11T04:27:04,141 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/A in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:04,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:04,142 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/676e671cdce5456eb78f84e85cc07a24, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/690c535e3c8f4f428d3de00a6875626e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c81931684f2e4b9caeeeb5328e37963f] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=91.4 K 2024-12-11T04:27:04,142 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:04,142 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/676e671cdce5456eb78f84e85cc07a24, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/690c535e3c8f4f428d3de00a6875626e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c81931684f2e4b9caeeeb5328e37963f] 2024-12-11T04:27:04,142 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:04,142 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/B is initiating minor compaction (all files) 2024-12-11T04:27:04,142 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/B in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:04,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:04,142 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/d5b3bdff986c4f6f9d72c84280dee44b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/407d671bf8f24bf89711f70c89e40c1a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/7ea9fa8655844084a613a6acb36e6e25] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=33.6 K 2024-12-11T04:27:04,143 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 676e671cdce5456eb78f84e85cc07a24, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=147, earliestPutTs=1733891220879 2024-12-11T04:27:04,143 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting d5b3bdff986c4f6f9d72c84280dee44b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=147, earliestPutTs=1733891220879 2024-12-11T04:27:04,143 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 690c535e3c8f4f428d3de00a6875626e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733891221535 2024-12-11T04:27:04,143 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 407d671bf8f24bf89711f70c89e40c1a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733891221535 2024-12-11T04:27:04,144 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting c81931684f2e4b9caeeeb5328e37963f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1733891222159 2024-12-11T04:27:04,144 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ea9fa8655844084a613a6acb36e6e25, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1733891222159 2024-12-11T04:27:04,156 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:04,158 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#B#compaction#175 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:04,158 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/813a4c1ccbda4899943d73499c079d96 is 50, key is test_row_0/B:col10/1733891222159/Put/seqid=0 2024-12-11T04:27:04,164 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121160394f483fb14eeeb7c369205a010ead_0d95822809793edddcee8d4c8425775e store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:04,167 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121160394f483fb14eeeb7c369205a010ead_0d95822809793edddcee8d4c8425775e, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:04,167 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121160394f483fb14eeeb7c369205a010ead_0d95822809793edddcee8d4c8425775e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:04,174 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:04,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-11T04:27:04,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:04,175 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:27:04,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:04,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:04,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:04,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:04,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:04,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:04,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742028_1204 (size=12561) 2024-12-11T04:27:04,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742029_1205 (size=4469) 2024-12-11T04:27:04,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211691e60cfdbac46dfb838ae690aa0c3cd_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891222871/Put/seqid=0 2024-12-11T04:27:04,217 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#A#compaction#174 average throughput is 0.40 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:04,218 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/875f8896158a4b529220b5ea2da4649d is 175, key is test_row_0/A:col10/1733891222159/Put/seqid=0 2024-12-11T04:27:04,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742030_1206 (size=12304) 2024-12-11T04:27:04,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742031_1207 (size=31622) 2024-12-11T04:27:04,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,253 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/875f8896158a4b529220b5ea2da4649d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/875f8896158a4b529220b5ea2da4649d 2024-12-11T04:27:04,254 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211691e60cfdbac46dfb838ae690aa0c3cd_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211691e60cfdbac46dfb838ae690aa0c3cd_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:04,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/836e6d75a39148a68ad6b249dc731a35, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:04,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/836e6d75a39148a68ad6b249dc731a35 is 175, key is test_row_0/A:col10/1733891222871/Put/seqid=0 2024-12-11T04:27:04,262 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d95822809793edddcee8d4c8425775e/A of 0d95822809793edddcee8d4c8425775e into 875f8896158a4b529220b5ea2da4649d(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:04,262 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:04,262 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/A, priority=13, startTime=1733891224140; duration=0sec 2024-12-11T04:27:04,263 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:04,263 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:A 2024-12-11T04:27:04,263 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:04,264 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34367 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:04,264 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/C is initiating minor compaction (all files) 2024-12-11T04:27:04,265 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/C in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:04,265 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/7075cfeae58a49a7a8b1bb23cda171f7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/9763c4c0977c4debaa9bb043fc73f005, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/539ebc8558c74a2db98c9dd0d529cfaa] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=33.6 K 2024-12-11T04:27:04,266 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7075cfeae58a49a7a8b1bb23cda171f7, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=147, earliestPutTs=1733891220879 2024-12-11T04:27:04,266 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9763c4c0977c4debaa9bb043fc73f005, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1733891221535 2024-12-11T04:27:04,266 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 539ebc8558c74a2db98c9dd0d529cfaa, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1733891222159 2024-12-11T04:27:04,293 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#C#compaction#177 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:04,295 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/56279eb499d6407b86cb4059082d6634 is 50, key is test_row_0/C:col10/1733891222159/Put/seqid=0 2024-12-11T04:27:04,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742032_1208 (size=31105) 2024-12-11T04:27:04,318 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/836e6d75a39148a68ad6b249dc731a35 2024-12-11T04:27:04,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742033_1209 (size=12561) 2024-12-11T04:27:04,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/66228bec15bd4b30920ecb4a7a2b3d31 is 50, key is test_row_0/B:col10/1733891222871/Put/seqid=0 2024-12-11T04:27:04,349 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/56279eb499d6407b86cb4059082d6634 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/56279eb499d6407b86cb4059082d6634 2024-12-11T04:27:04,355 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d95822809793edddcee8d4c8425775e/C of 0d95822809793edddcee8d4c8425775e into 56279eb499d6407b86cb4059082d6634(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:04,355 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:04,355 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/C, priority=13, startTime=1733891224141; duration=0sec 2024-12-11T04:27:04,356 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:04,356 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:C 2024-12-11T04:27:04,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742034_1210 (size=12151) 2024-12-11T04:27:04,595 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/813a4c1ccbda4899943d73499c079d96 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/813a4c1ccbda4899943d73499c079d96 2024-12-11T04:27:04,601 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d95822809793edddcee8d4c8425775e/B of 0d95822809793edddcee8d4c8425775e into 813a4c1ccbda4899943d73499c079d96(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:04,601 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:04,602 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/B, priority=13, startTime=1733891224140; duration=0sec 2024-12-11T04:27:04,602 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:04,602 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:B 2024-12-11T04:27:04,792 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/66228bec15bd4b30920ecb4a7a2b3d31 2024-12-11T04:27:04,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/a83cddd1dbb4413a9f6614e490b82972 is 50, key is test_row_0/C:col10/1733891222871/Put/seqid=0 2024-12-11T04:27:04,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742035_1211 (size=12151) 2024-12-11T04:27:04,818 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/a83cddd1dbb4413a9f6614e490b82972 2024-12-11T04:27:04,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/836e6d75a39148a68ad6b249dc731a35 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/836e6d75a39148a68ad6b249dc731a35 2024-12-11T04:27:04,831 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/836e6d75a39148a68ad6b249dc731a35, entries=150, sequenceid=211, filesize=30.4 K 2024-12-11T04:27:04,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/66228bec15bd4b30920ecb4a7a2b3d31 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/66228bec15bd4b30920ecb4a7a2b3d31 2024-12-11T04:27:04,838 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/66228bec15bd4b30920ecb4a7a2b3d31, entries=150, sequenceid=211, filesize=11.9 K 2024-12-11T04:27:04,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/a83cddd1dbb4413a9f6614e490b82972 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/a83cddd1dbb4413a9f6614e490b82972 2024-12-11T04:27:04,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,847 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/a83cddd1dbb4413a9f6614e490b82972, entries=150, sequenceid=211, filesize=11.9 K 2024-12-11T04:27:04,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,848 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 0d95822809793edddcee8d4c8425775e in 673ms, sequenceid=211, compaction requested=false 2024-12-11T04:27:04,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:04,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:04,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-11T04:27:04,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-11T04:27:04,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,852 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-11T04:27:04,852 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0530 sec 2024-12-11T04:27:04,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,854 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 2.0570 sec 2024-12-11T04:27:04,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-11T04:27:04,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,901 INFO [Thread-752 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-11T04:27:04,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,903 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:04,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-11T04:27:04,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,905 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:04,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,905 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:04,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-11T04:27:04,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,905 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:04,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:04,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-11T04:27:05,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:05,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:27:05,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:05,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:05,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:05,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:05,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:05,019 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:05,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119a41e4bec25644c78c4333a94a430bb0_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891225017/Put/seqid=0 2024-12-11T04:27:05,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,057 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,058 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-11T04:27:05,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:05,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891285074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891285075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891285078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742037_1213 (size=27248) 2024-12-11T04:27:05,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891285181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891285182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891285187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-11T04:27:05,211 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-11T04:27:05,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:05,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,212 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,264 INFO [master/5f466b3719ec:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-11T04:27:05,264 INFO [master/5f466b3719ec:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-11T04:27:05,364 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,365 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-11T04:27:05,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:05,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,365 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891285386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891285386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891285390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,486 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:05,492 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119a41e4bec25644c78c4333a94a430bb0_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412119a41e4bec25644c78c4333a94a430bb0_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:05,493 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/b0168a1b63744731ba362cfa27978028, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:05,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/b0168a1b63744731ba362cfa27978028 is 175, key is test_row_0/A:col10/1733891225017/Put/seqid=0 2024-12-11T04:27:05,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742036_1212 (size=83035) 2024-12-11T04:27:05,497 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=226, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/b0168a1b63744731ba362cfa27978028 2024-12-11T04:27:05,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/a7e45153f13d4b2cac4e212fbf3ea028 is 50, key is test_row_0/B:col10/1733891225017/Put/seqid=0 2024-12-11T04:27:05,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-11T04:27:05,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742038_1214 (size=12151) 2024-12-11T04:27:05,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=226 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/a7e45153f13d4b2cac4e212fbf3ea028 2024-12-11T04:27:05,517 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-11T04:27:05,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:05,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,518 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/bb35918bf69746b5ac84c4d529599c70 is 50, key is test_row_0/C:col10/1733891225017/Put/seqid=0 2024-12-11T04:27:05,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742039_1215 (size=12151) 2024-12-11T04:27:05,670 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-11T04:27:05,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:05,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891285689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891285690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891285694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:05,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891285703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,706 DEBUG [Thread-742 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., hostname=5f466b3719ec,39071,1733891180267, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T04:27:05,823 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,824 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-11T04:27:05,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:05,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:05,932 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=226 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/bb35918bf69746b5ac84c4d529599c70 2024-12-11T04:27:05,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/b0168a1b63744731ba362cfa27978028 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b0168a1b63744731ba362cfa27978028 2024-12-11T04:27:05,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b0168a1b63744731ba362cfa27978028, entries=450, sequenceid=226, filesize=81.1 K 2024-12-11T04:27:05,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/a7e45153f13d4b2cac4e212fbf3ea028 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/a7e45153f13d4b2cac4e212fbf3ea028 2024-12-11T04:27:05,953 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/a7e45153f13d4b2cac4e212fbf3ea028, entries=150, sequenceid=226, filesize=11.9 K 2024-12-11T04:27:05,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/bb35918bf69746b5ac84c4d529599c70 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/bb35918bf69746b5ac84c4d529599c70 2024-12-11T04:27:05,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/bb35918bf69746b5ac84c4d529599c70, entries=150, sequenceid=226, filesize=11.9 K 2024-12-11T04:27:05,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 0d95822809793edddcee8d4c8425775e in 942ms, sequenceid=226, compaction requested=true 2024-12-11T04:27:05,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:05,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:05,959 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:05,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:05,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:05,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:05,960 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:05,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:05,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:05,961 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 145762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:05,961 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/A is initiating minor compaction (all files) 2024-12-11T04:27:05,961 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:05,961 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/B is initiating minor compaction (all files) 2024-12-11T04:27:05,961 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/A in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,961 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/B in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,961 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/813a4c1ccbda4899943d73499c079d96, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/66228bec15bd4b30920ecb4a7a2b3d31, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/a7e45153f13d4b2cac4e212fbf3ea028] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=36.0 K 2024-12-11T04:27:05,961 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/875f8896158a4b529220b5ea2da4649d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/836e6d75a39148a68ad6b249dc731a35, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b0168a1b63744731ba362cfa27978028] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=142.3 K 2024-12-11T04:27:05,961 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,961 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/875f8896158a4b529220b5ea2da4649d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/836e6d75a39148a68ad6b249dc731a35, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b0168a1b63744731ba362cfa27978028] 2024-12-11T04:27:05,962 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 813a4c1ccbda4899943d73499c079d96, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1733891221543 2024-12-11T04:27:05,962 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 875f8896158a4b529220b5ea2da4649d, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1733891221543 2024-12-11T04:27:05,962 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 66228bec15bd4b30920ecb4a7a2b3d31, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733891222862 2024-12-11T04:27:05,963 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting a7e45153f13d4b2cac4e212fbf3ea028, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733891225009 2024-12-11T04:27:05,963 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 836e6d75a39148a68ad6b249dc731a35, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733891222862 2024-12-11T04:27:05,964 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0168a1b63744731ba362cfa27978028, keycount=450, bloomtype=ROW, size=81.1 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733891225003 2024-12-11T04:27:05,977 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:05,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-11T04:27:05,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:05,978 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:27:05,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:05,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:05,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:05,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:05,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:05,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:05,986 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#B#compaction#183 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:05,987 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/8c2f246c30a54aab99c2dbc6a232e457 is 50, key is test_row_0/B:col10/1733891225017/Put/seqid=0 2024-12-11T04:27:05,998 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:06,007 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412119b73485d2a7f42cdb50c3f86c8d61cdb_0d95822809793edddcee8d4c8425775e store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:06,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-11T04:27:06,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211641a00a91cda49ac95d3036111aaef9a_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891225074/Put/seqid=0 2024-12-11T04:27:06,013 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412119b73485d2a7f42cdb50c3f86c8d61cdb_0d95822809793edddcee8d4c8425775e, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:06,013 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412119b73485d2a7f42cdb50c3f86c8d61cdb_0d95822809793edddcee8d4c8425775e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:06,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742040_1216 (size=12663) 2024-12-11T04:27:06,026 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/8c2f246c30a54aab99c2dbc6a232e457 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8c2f246c30a54aab99c2dbc6a232e457 2024-12-11T04:27:06,035 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d95822809793edddcee8d4c8425775e/B of 0d95822809793edddcee8d4c8425775e into 8c2f246c30a54aab99c2dbc6a232e457(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:06,036 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:06,036 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/B, priority=13, startTime=1733891225959; duration=0sec 2024-12-11T04:27:06,036 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:06,036 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:B 2024-12-11T04:27:06,036 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:06,038 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:06,038 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/C is initiating minor compaction (all files) 2024-12-11T04:27:06,039 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/C in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:06,039 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/56279eb499d6407b86cb4059082d6634, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/a83cddd1dbb4413a9f6614e490b82972, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/bb35918bf69746b5ac84c4d529599c70] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=36.0 K 2024-12-11T04:27:06,039 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 56279eb499d6407b86cb4059082d6634, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1733891221543 2024-12-11T04:27:06,039 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting a83cddd1dbb4413a9f6614e490b82972, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733891222862 2024-12-11T04:27:06,040 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting bb35918bf69746b5ac84c4d529599c70, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733891225009 2024-12-11T04:27:06,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742042_1218 (size=4469) 2024-12-11T04:27:06,055 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#A#compaction#184 average throughput is 0.43 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:06,056 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/724d2836b5634efbbb5b25ad2b1bc1a6 is 175, key is test_row_0/A:col10/1733891225017/Put/seqid=0 2024-12-11T04:27:06,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742041_1217 (size=12304) 2024-12-11T04:27:06,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:06,068 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211641a00a91cda49ac95d3036111aaef9a_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211641a00a91cda49ac95d3036111aaef9a_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:06,069 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#C#compaction#186 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:06,069 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/6b3d083986f1431f91d1ca3278fbd6d1 is 50, key is test_row_0/C:col10/1733891225017/Put/seqid=0 2024-12-11T04:27:06,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/648251a83c164e13a6cd4f05b7a5ac29, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:06,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/648251a83c164e13a6cd4f05b7a5ac29 is 175, key is test_row_0/A:col10/1733891225074/Put/seqid=0 2024-12-11T04:27:06,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742044_1220 (size=31105) 2024-12-11T04:27:06,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742045_1221 (size=12663) 2024-12-11T04:27:06,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742043_1219 (size=31617) 2024-12-11T04:27:06,102 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/724d2836b5634efbbb5b25ad2b1bc1a6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/724d2836b5634efbbb5b25ad2b1bc1a6 2024-12-11T04:27:06,108 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d95822809793edddcee8d4c8425775e/A of 0d95822809793edddcee8d4c8425775e into 724d2836b5634efbbb5b25ad2b1bc1a6(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:06,108 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:06,108 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/A, priority=13, startTime=1733891225959; duration=0sec 2024-12-11T04:27:06,108 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:06,108 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:A 2024-12-11T04:27:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:06,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:06,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891286205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:06,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891286205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:06,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891286207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:06,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891286311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:06,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:06,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891286311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891286311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:06,494 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/648251a83c164e13a6cd4f05b7a5ac29 2024-12-11T04:27:06,504 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/6b3d083986f1431f91d1ca3278fbd6d1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/6b3d083986f1431f91d1ca3278fbd6d1 2024-12-11T04:27:06,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/f21e76840718427da4a38ae87e2e871e is 50, key is test_row_0/B:col10/1733891225074/Put/seqid=0 2024-12-11T04:27:06,509 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d95822809793edddcee8d4c8425775e/C of 0d95822809793edddcee8d4c8425775e into 6b3d083986f1431f91d1ca3278fbd6d1(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:06,509 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:06,509 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/C, priority=13, startTime=1733891225960; duration=0sec 2024-12-11T04:27:06,510 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:06,510 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:C 2024-12-11T04:27:06,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742046_1222 (size=12151) 2024-12-11T04:27:06,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:06,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891286513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:06,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:06,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891286513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:06,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:06,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891286514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:06,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:06,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891286815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:06,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:06,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891286815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:06,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:06,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891286818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:06,917 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/f21e76840718427da4a38ae87e2e871e 2024-12-11T04:27:06,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/00fc3950340a46b88172189f58ef9940 is 50, key is test_row_0/C:col10/1733891225074/Put/seqid=0 2024-12-11T04:27:06,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742047_1223 (size=12151) 2024-12-11T04:27:07,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-11T04:27:07,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:07,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891287321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:07,324 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:07,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:07,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891287322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:07,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891287323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:07,335 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/00fc3950340a46b88172189f58ef9940 2024-12-11T04:27:07,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/648251a83c164e13a6cd4f05b7a5ac29 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/648251a83c164e13a6cd4f05b7a5ac29 2024-12-11T04:27:07,348 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/648251a83c164e13a6cd4f05b7a5ac29, entries=150, sequenceid=250, filesize=30.4 K 2024-12-11T04:27:07,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/f21e76840718427da4a38ae87e2e871e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/f21e76840718427da4a38ae87e2e871e 2024-12-11T04:27:07,353 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/f21e76840718427da4a38ae87e2e871e, entries=150, sequenceid=250, filesize=11.9 K 2024-12-11T04:27:07,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/00fc3950340a46b88172189f58ef9940 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/00fc3950340a46b88172189f58ef9940 2024-12-11T04:27:07,361 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/00fc3950340a46b88172189f58ef9940, entries=150, sequenceid=250, filesize=11.9 K 2024-12-11T04:27:07,362 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 0d95822809793edddcee8d4c8425775e in 1384ms, sequenceid=250, compaction requested=false 2024-12-11T04:27:07,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:07,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:07,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-11T04:27:07,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-11T04:27:07,369 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-11T04:27:07,369 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4620 sec 2024-12-11T04:27:07,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 2.4660 sec 2024-12-11T04:27:07,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:07,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T04:27:07,937 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:07,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:07,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:07,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:07,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:07,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:07,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117ae0b9d9a0354386ab8aa73409d3d1ff_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891227936/Put/seqid=0 2024-12-11T04:27:07,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742048_1224 (size=12454) 2024-12-11T04:27:07,966 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:07,971 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117ae0b9d9a0354386ab8aa73409d3d1ff_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117ae0b9d9a0354386ab8aa73409d3d1ff_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:07,973 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/d911735e4ceb4cb5ab147c5463c02ce2, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:07,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/d911735e4ceb4cb5ab147c5463c02ce2 is 175, key is test_row_0/A:col10/1733891227936/Put/seqid=0 2024-12-11T04:27:08,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742049_1225 (size=31255) 2024-12-11T04:27:08,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:08,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891288010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:08,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891288114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:08,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891288315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:08,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891288323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:08,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891288326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:08,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891288327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:08,402 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=266, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/d911735e4ceb4cb5ab147c5463c02ce2 2024-12-11T04:27:08,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/06636c96ed6e4667be70c50530d9bb1b is 50, key is test_row_0/B:col10/1733891227936/Put/seqid=0 2024-12-11T04:27:08,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742050_1226 (size=12301) 2024-12-11T04:27:08,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/06636c96ed6e4667be70c50530d9bb1b 2024-12-11T04:27:08,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/faa076d95f1b49ceb02adc9c7ee44c6f is 50, key is test_row_0/C:col10/1733891227936/Put/seqid=0 2024-12-11T04:27:08,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742051_1227 (size=12301) 2024-12-11T04:27:08,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:08,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891288621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:08,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/faa076d95f1b49ceb02adc9c7ee44c6f 2024-12-11T04:27:08,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/d911735e4ceb4cb5ab147c5463c02ce2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/d911735e4ceb4cb5ab147c5463c02ce2 2024-12-11T04:27:08,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/d911735e4ceb4cb5ab147c5463c02ce2, entries=150, sequenceid=266, filesize=30.5 K 2024-12-11T04:27:08,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/06636c96ed6e4667be70c50530d9bb1b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/06636c96ed6e4667be70c50530d9bb1b 2024-12-11T04:27:08,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/06636c96ed6e4667be70c50530d9bb1b, entries=150, sequenceid=266, filesize=12.0 K 2024-12-11T04:27:08,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/faa076d95f1b49ceb02adc9c7ee44c6f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/faa076d95f1b49ceb02adc9c7ee44c6f 2024-12-11T04:27:08,848 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/faa076d95f1b49ceb02adc9c7ee44c6f, entries=150, sequenceid=266, filesize=12.0 K 2024-12-11T04:27:08,849 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 0d95822809793edddcee8d4c8425775e in 912ms, sequenceid=266, compaction requested=true 2024-12-11T04:27:08,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:08,850 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:08,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:08,850 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:08,850 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:08,851 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93977 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:08,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:08,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:08,851 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/A is initiating minor compaction (all files) 2024-12-11T04:27:08,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:08,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:08,851 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/A in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:08,851 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/724d2836b5634efbbb5b25ad2b1bc1a6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/648251a83c164e13a6cd4f05b7a5ac29, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/d911735e4ceb4cb5ab147c5463c02ce2] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=91.8 K 2024-12-11T04:27:08,851 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:08,851 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/724d2836b5634efbbb5b25ad2b1bc1a6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/648251a83c164e13a6cd4f05b7a5ac29, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/d911735e4ceb4cb5ab147c5463c02ce2] 2024-12-11T04:27:08,852 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 724d2836b5634efbbb5b25ad2b1bc1a6, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733891225009 2024-12-11T04:27:08,852 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 648251a83c164e13a6cd4f05b7a5ac29, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733891225061 2024-12-11T04:27:08,852 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:08,852 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/B is initiating minor compaction (all files) 2024-12-11T04:27:08,852 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/B in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:08,853 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8c2f246c30a54aab99c2dbc6a232e457, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/f21e76840718427da4a38ae87e2e871e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/06636c96ed6e4667be70c50530d9bb1b] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=36.2 K 2024-12-11T04:27:08,853 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting d911735e4ceb4cb5ab147c5463c02ce2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733891226201 2024-12-11T04:27:08,853 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c2f246c30a54aab99c2dbc6a232e457, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733891225009 2024-12-11T04:27:08,854 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting f21e76840718427da4a38ae87e2e871e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733891225061 2024-12-11T04:27:08,854 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 06636c96ed6e4667be70c50530d9bb1b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733891226201 2024-12-11T04:27:08,862 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:08,864 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#B#compaction#193 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:08,865 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/5f01572e7fbc4f5facb0262e04054a7b is 50, key is test_row_0/B:col10/1733891227936/Put/seqid=0 2024-12-11T04:27:08,867 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211e75649bf61cc4c2182e0f5b8cd896091_0d95822809793edddcee8d4c8425775e store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:08,870 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211e75649bf61cc4c2182e0f5b8cd896091_0d95822809793edddcee8d4c8425775e, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:08,870 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e75649bf61cc4c2182e0f5b8cd896091_0d95822809793edddcee8d4c8425775e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:08,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742052_1228 (size=12915) 2024-12-11T04:27:08,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742053_1229 (size=4469) 2024-12-11T04:27:08,892 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/5f01572e7fbc4f5facb0262e04054a7b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/5f01572e7fbc4f5facb0262e04054a7b 2024-12-11T04:27:08,893 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#A#compaction#192 average throughput is 0.79 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:08,894 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/c36198d8fdfc408ab9b03f82ce94cb8b is 175, key is test_row_0/A:col10/1733891227936/Put/seqid=0 2024-12-11T04:27:08,900 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d95822809793edddcee8d4c8425775e/B of 0d95822809793edddcee8d4c8425775e into 5f01572e7fbc4f5facb0262e04054a7b(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:08,900 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:08,900 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/B, priority=13, startTime=1733891228850; duration=0sec 2024-12-11T04:27:08,900 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:08,900 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:B 2024-12-11T04:27:08,900 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:08,902 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:08,902 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/C is initiating minor compaction (all files) 2024-12-11T04:27:08,902 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/C in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:08,902 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/6b3d083986f1431f91d1ca3278fbd6d1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/00fc3950340a46b88172189f58ef9940, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/faa076d95f1b49ceb02adc9c7ee44c6f] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=36.2 K 2024-12-11T04:27:08,903 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b3d083986f1431f91d1ca3278fbd6d1, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=226, earliestPutTs=1733891225009 2024-12-11T04:27:08,904 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 00fc3950340a46b88172189f58ef9940, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1733891225061 2024-12-11T04:27:08,904 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting faa076d95f1b49ceb02adc9c7ee44c6f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733891226201 2024-12-11T04:27:08,915 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#C#compaction#194 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:08,916 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/604a413f81364431af096438060cf63f is 50, key is test_row_0/C:col10/1733891227936/Put/seqid=0 2024-12-11T04:27:08,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742054_1230 (size=31869) 2024-12-11T04:27:08,926 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/c36198d8fdfc408ab9b03f82ce94cb8b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c36198d8fdfc408ab9b03f82ce94cb8b 2024-12-11T04:27:08,933 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d95822809793edddcee8d4c8425775e/A of 0d95822809793edddcee8d4c8425775e into c36198d8fdfc408ab9b03f82ce94cb8b(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:08,933 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:08,933 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/A, priority=13, startTime=1733891228849; duration=0sec 2024-12-11T04:27:08,933 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:08,933 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:A 2024-12-11T04:27:08,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742055_1231 (size=12915) 2024-12-11T04:27:08,944 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/604a413f81364431af096438060cf63f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/604a413f81364431af096438060cf63f 2024-12-11T04:27:08,950 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0d95822809793edddcee8d4c8425775e/C of 0d95822809793edddcee8d4c8425775e into 604a413f81364431af096438060cf63f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:08,950 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:08,950 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/C, priority=13, startTime=1733891228851; duration=0sec 2024-12-11T04:27:08,950 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:08,950 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:C 2024-12-11T04:27:09,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-11T04:27:09,013 INFO [Thread-752 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-11T04:27:09,015 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:09,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-11T04:27:09,017 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:09,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-11T04:27:09,018 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:09,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:09,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-11T04:27:09,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:09,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:27:09,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:09,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:09,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:09,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:09,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:09,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:09,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110f623bf96455498aaae329355da2a3ae_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891229125/Put/seqid=0 2024-12-11T04:27:09,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742056_1232 (size=14994) 2024-12-11T04:27:09,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:09,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891289147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:09,169 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:09,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-11T04:27:09,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:09,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:09,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:09,170 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:09,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891289249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:09,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-11T04:27:09,322 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:09,322 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-11T04:27:09,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:09,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:09,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:09,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:09,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891289453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:09,475 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:09,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-11T04:27:09,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:09,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:09,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:09,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,542 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:09,547 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110f623bf96455498aaae329355da2a3ae_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110f623bf96455498aaae329355da2a3ae_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:09,548 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/9a263894cbad4b48a1d1ffeb55e97a73, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:09,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/9a263894cbad4b48a1d1ffeb55e97a73 is 175, key is test_row_0/A:col10/1733891229125/Put/seqid=0 2024-12-11T04:27:09,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742057_1233 (size=39949) 2024-12-11T04:27:09,557 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/9a263894cbad4b48a1d1ffeb55e97a73 2024-12-11T04:27:09,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/0e6a0dead41146c3bcf2900b765a7fbe is 50, key is test_row_0/B:col10/1733891229125/Put/seqid=0 2024-12-11T04:27:09,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742058_1234 (size=12301) 2024-12-11T04:27:09,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/0e6a0dead41146c3bcf2900b765a7fbe 2024-12-11T04:27:09,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/06179e1773bb4701b0c4209a833466aa is 50, key is test_row_0/C:col10/1733891229125/Put/seqid=0 2024-12-11T04:27:09,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742059_1235 (size=12301) 2024-12-11T04:27:09,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-11T04:27:09,628 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:09,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-11T04:27:09,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:09,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:09,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:09,629 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:09,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35454 deadline: 1733891289742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:09,744 DEBUG [Thread-742 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8196 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., hostname=5f466b3719ec,39071,1733891180267, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T04:27:09,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:09,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891289756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:09,781 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:09,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-11T04:27:09,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:09,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:09,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:09,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,935 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:09,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-11T04:27:09,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:09,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:09,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:09,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:09,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/06179e1773bb4701b0c4209a833466aa 2024-12-11T04:27:09,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/9a263894cbad4b48a1d1ffeb55e97a73 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/9a263894cbad4b48a1d1ffeb55e97a73 2024-12-11T04:27:09,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/9a263894cbad4b48a1d1ffeb55e97a73, entries=200, sequenceid=293, filesize=39.0 K 2024-12-11T04:27:09,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/0e6a0dead41146c3bcf2900b765a7fbe as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/0e6a0dead41146c3bcf2900b765a7fbe 2024-12-11T04:27:10,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/0e6a0dead41146c3bcf2900b765a7fbe, entries=150, sequenceid=293, filesize=12.0 K 2024-12-11T04:27:10,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/06179e1773bb4701b0c4209a833466aa as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/06179e1773bb4701b0c4209a833466aa 2024-12-11T04:27:10,008 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/06179e1773bb4701b0c4209a833466aa, entries=150, sequenceid=293, filesize=12.0 K 2024-12-11T04:27:10,009 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 0d95822809793edddcee8d4c8425775e in 883ms, sequenceid=293, compaction requested=false 2024-12-11T04:27:10,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:10,088 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:10,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-11T04:27:10,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:10,089 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T04:27:10,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:10,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:10,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:10,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:10,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:10,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:10,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121176549f4ea16c49168b3f09f6ace43bfd_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891229147/Put/seqid=0 2024-12-11T04:27:10,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742060_1236 (size=12454) 2024-12-11T04:27:10,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:10,109 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121176549f4ea16c49168b3f09f6ace43bfd_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121176549f4ea16c49168b3f09f6ace43bfd_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:10,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/e9f8ea4004d94d9bbc62fe742eb22c37, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:10,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/e9f8ea4004d94d9bbc62fe742eb22c37 is 175, key is test_row_0/A:col10/1733891229147/Put/seqid=0 2024-12-11T04:27:10,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742061_1237 (size=31255) 2024-12-11T04:27:10,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-11T04:27:10,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:10,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:10,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:10,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891290309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:10,337 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:10,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891290337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:10,338 DEBUG [Thread-750 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4131 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., hostname=5f466b3719ec,39071,1733891180267, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T04:27:10,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:10,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891290342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:10,344 DEBUG [Thread-744 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., hostname=5f466b3719ec,39071,1733891180267, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T04:27:10,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:10,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891290344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:10,347 DEBUG [Thread-746 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., hostname=5f466b3719ec,39071,1733891180267, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T04:27:10,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:10,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891290412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:10,517 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=305, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/e9f8ea4004d94d9bbc62fe742eb22c37 2024-12-11T04:27:10,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/0ef39148691145be9da6f25ba8cf1f50 is 50, key is test_row_0/B:col10/1733891229147/Put/seqid=0 2024-12-11T04:27:10,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742062_1238 (size=12301) 2024-12-11T04:27:10,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:10,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891290617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:10,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:10,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891290920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:10,930 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/0ef39148691145be9da6f25ba8cf1f50 2024-12-11T04:27:10,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/d54c45f05e4d4802882ea6e45609b9a3 is 50, key is test_row_0/C:col10/1733891229147/Put/seqid=0 2024-12-11T04:27:10,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742063_1239 (size=12301) 2024-12-11T04:27:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-11T04:27:11,342 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/d54c45f05e4d4802882ea6e45609b9a3 2024-12-11T04:27:11,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/e9f8ea4004d94d9bbc62fe742eb22c37 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/e9f8ea4004d94d9bbc62fe742eb22c37 2024-12-11T04:27:11,352 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/e9f8ea4004d94d9bbc62fe742eb22c37, entries=150, sequenceid=305, filesize=30.5 K 2024-12-11T04:27:11,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/0ef39148691145be9da6f25ba8cf1f50 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/0ef39148691145be9da6f25ba8cf1f50 2024-12-11T04:27:11,358 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/0ef39148691145be9da6f25ba8cf1f50, entries=150, sequenceid=305, filesize=12.0 K 2024-12-11T04:27:11,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/d54c45f05e4d4802882ea6e45609b9a3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/d54c45f05e4d4802882ea6e45609b9a3 2024-12-11T04:27:11,363 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/d54c45f05e4d4802882ea6e45609b9a3, entries=150, sequenceid=305, filesize=12.0 K 2024-12-11T04:27:11,363 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 0d95822809793edddcee8d4c8425775e in 1274ms, sequenceid=305, compaction requested=true 2024-12-11T04:27:11,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:11,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:11,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-11T04:27:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-11T04:27:11,366 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-11T04:27:11,366 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3470 sec 2024-12-11T04:27:11,367 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 2.3510 sec 2024-12-11T04:27:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:11,425 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T04:27:11,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:11,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:11,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:11,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:11,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:11,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:11,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117551996d473f4837b99e5c518ef12157_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891230303/Put/seqid=0 2024-12-11T04:27:11,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742064_1240 (size=14994) 2024-12-11T04:27:11,441 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:11,448 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117551996d473f4837b99e5c518ef12157_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117551996d473f4837b99e5c518ef12157_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:11,448 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/bc6d3147a64642288ff6640e6d761b8d, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:11,449 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/bc6d3147a64642288ff6640e6d761b8d is 175, key is test_row_0/A:col10/1733891230303/Put/seqid=0 2024-12-11T04:27:11,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891291447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:11,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742065_1241 (size=39949) 2024-12-11T04:27:11,455 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/bc6d3147a64642288ff6640e6d761b8d 2024-12-11T04:27:11,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/ccd1de5a62e3405292c72712c695e93b is 50, key is test_row_0/B:col10/1733891230303/Put/seqid=0 2024-12-11T04:27:11,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742066_1242 (size=12301) 2024-12-11T04:27:11,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:11,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891291550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:11,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:11,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891291755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:11,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/ccd1de5a62e3405292c72712c695e93b 2024-12-11T04:27:11,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/600ca7ecdef7428d836248b8326befb1 is 50, key is test_row_0/C:col10/1733891230303/Put/seqid=0 2024-12-11T04:27:11,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742067_1243 (size=12301) 2024-12-11T04:27:12,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:12,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891292058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:12,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/600ca7ecdef7428d836248b8326befb1 2024-12-11T04:27:12,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/bc6d3147a64642288ff6640e6d761b8d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/bc6d3147a64642288ff6640e6d761b8d 2024-12-11T04:27:12,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/bc6d3147a64642288ff6640e6d761b8d, entries=200, sequenceid=330, filesize=39.0 K 2024-12-11T04:27:12,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/ccd1de5a62e3405292c72712c695e93b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ccd1de5a62e3405292c72712c695e93b 2024-12-11T04:27:12,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ccd1de5a62e3405292c72712c695e93b, entries=150, sequenceid=330, filesize=12.0 K 2024-12-11T04:27:12,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/600ca7ecdef7428d836248b8326befb1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/600ca7ecdef7428d836248b8326befb1 2024-12-11T04:27:12,306 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/600ca7ecdef7428d836248b8326befb1, entries=150, sequenceid=330, filesize=12.0 K 2024-12-11T04:27:12,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 0d95822809793edddcee8d4c8425775e in 882ms, sequenceid=330, compaction requested=true 2024-12-11T04:27:12,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:12,307 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:27:12,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:12,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:12,308 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:27:12,308 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:12,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:12,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:12,309 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:12,310 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:27:12,310 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/A is initiating minor compaction (all files) 2024-12-11T04:27:12,310 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/A in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:12,310 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c36198d8fdfc408ab9b03f82ce94cb8b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/9a263894cbad4b48a1d1ffeb55e97a73, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/e9f8ea4004d94d9bbc62fe742eb22c37, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/bc6d3147a64642288ff6640e6d761b8d] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=139.7 K 2024-12-11T04:27:12,310 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:12,310 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c36198d8fdfc408ab9b03f82ce94cb8b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/9a263894cbad4b48a1d1ffeb55e97a73, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/e9f8ea4004d94d9bbc62fe742eb22c37, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/bc6d3147a64642288ff6640e6d761b8d] 2024-12-11T04:27:12,311 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:27:12,311 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting c36198d8fdfc408ab9b03f82ce94cb8b, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733891226201 2024-12-11T04:27:12,311 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/B is initiating minor compaction (all files) 2024-12-11T04:27:12,311 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/B in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:12,311 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/5f01572e7fbc4f5facb0262e04054a7b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/0e6a0dead41146c3bcf2900b765a7fbe, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/0ef39148691145be9da6f25ba8cf1f50, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ccd1de5a62e3405292c72712c695e93b] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=48.7 K 2024-12-11T04:27:12,311 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a263894cbad4b48a1d1ffeb55e97a73, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733891227986 2024-12-11T04:27:12,311 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f01572e7fbc4f5facb0262e04054a7b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733891226201 2024-12-11T04:27:12,312 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9f8ea4004d94d9bbc62fe742eb22c37, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733891229133 2024-12-11T04:27:12,312 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e6a0dead41146c3bcf2900b765a7fbe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733891227986 2024-12-11T04:27:12,312 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ef39148691145be9da6f25ba8cf1f50, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733891229133 2024-12-11T04:27:12,313 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc6d3147a64642288ff6640e6d761b8d, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733891230301 2024-12-11T04:27:12,313 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ccd1de5a62e3405292c72712c695e93b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733891230303 2024-12-11T04:27:12,326 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#B#compaction#204 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:12,326 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/ba6805ab609442f5b1977d80a689809e is 50, key is test_row_0/B:col10/1733891230303/Put/seqid=0 2024-12-11T04:27:12,328 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:12,331 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121147a3fcaad9554c87ab6e622fc893a409_0d95822809793edddcee8d4c8425775e store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:12,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742068_1244 (size=13051) 2024-12-11T04:27:12,334 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121147a3fcaad9554c87ab6e622fc893a409_0d95822809793edddcee8d4c8425775e, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:12,334 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121147a3fcaad9554c87ab6e622fc893a409_0d95822809793edddcee8d4c8425775e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:12,338 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/ba6805ab609442f5b1977d80a689809e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ba6805ab609442f5b1977d80a689809e 2024-12-11T04:27:12,342 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0d95822809793edddcee8d4c8425775e/B of 0d95822809793edddcee8d4c8425775e into ba6805ab609442f5b1977d80a689809e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:12,342 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:12,342 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/B, priority=12, startTime=1733891232308; duration=0sec 2024-12-11T04:27:12,343 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:12,343 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:B 2024-12-11T04:27:12,343 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:27:12,344 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:27:12,344 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/C is initiating minor compaction (all files) 2024-12-11T04:27:12,344 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/C in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:12,344 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/604a413f81364431af096438060cf63f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/06179e1773bb4701b0c4209a833466aa, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/d54c45f05e4d4802882ea6e45609b9a3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/600ca7ecdef7428d836248b8326befb1] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=48.7 K 2024-12-11T04:27:12,345 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 604a413f81364431af096438060cf63f, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733891226201 2024-12-11T04:27:12,345 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 06179e1773bb4701b0c4209a833466aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1733891227986 2024-12-11T04:27:12,346 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting d54c45f05e4d4802882ea6e45609b9a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733891229133 2024-12-11T04:27:12,346 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 600ca7ecdef7428d836248b8326befb1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733891230303 2024-12-11T04:27:12,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742069_1245 (size=4469) 2024-12-11T04:27:12,348 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#A#compaction#205 average throughput is 1.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:12,349 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/158bb97e6807422eb0fb47232dfada33 is 175, key is test_row_0/A:col10/1733891230303/Put/seqid=0 2024-12-11T04:27:12,375 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#C#compaction#206 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:12,376 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/83024c6eb95141f5ae927034bb6b297c is 50, key is test_row_0/C:col10/1733891230303/Put/seqid=0 2024-12-11T04:27:12,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742070_1246 (size=32005) 2024-12-11T04:27:12,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742071_1247 (size=13051) 2024-12-11T04:27:12,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:12,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T04:27:12,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:12,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:12,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:12,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:12,566 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:12,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:12,590 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115e9d0d32eb8d4e53b85ac7a64a92474d_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891232565/Put/seqid=0 2024-12-11T04:27:12,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742072_1248 (size=12454) 2024-12-11T04:27:12,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:12,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891292620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:12,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:12,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891292724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:12,788 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/158bb97e6807422eb0fb47232dfada33 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/158bb97e6807422eb0fb47232dfada33 2024-12-11T04:27:12,796 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/83024c6eb95141f5ae927034bb6b297c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/83024c6eb95141f5ae927034bb6b297c 2024-12-11T04:27:12,797 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0d95822809793edddcee8d4c8425775e/A of 0d95822809793edddcee8d4c8425775e into 158bb97e6807422eb0fb47232dfada33(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:12,797 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:12,797 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/A, priority=12, startTime=1733891232307; duration=0sec 2024-12-11T04:27:12,797 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:12,797 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:A 2024-12-11T04:27:12,802 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0d95822809793edddcee8d4c8425775e/C of 0d95822809793edddcee8d4c8425775e into 83024c6eb95141f5ae927034bb6b297c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:12,802 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:12,802 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/C, priority=12, startTime=1733891232309; duration=0sec 2024-12-11T04:27:12,803 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:12,803 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:C 2024-12-11T04:27:12,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:12,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891292928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:12,995 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:13,000 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115e9d0d32eb8d4e53b85ac7a64a92474d_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412115e9d0d32eb8d4e53b85ac7a64a92474d_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:13,001 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/13fdf9c5521341ea9a03d8d7611ed099, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:13,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/13fdf9c5521341ea9a03d8d7611ed099 is 175, key is test_row_0/A:col10/1733891232565/Put/seqid=0 2024-12-11T04:27:13,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742073_1249 (size=31255) 2024-12-11T04:27:13,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-11T04:27:13,123 INFO [Thread-752 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-11T04:27:13,124 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-12-11T04:27:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-11T04:27:13,126 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:13,126 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:13,126 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:13,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-11T04:27:13,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:13,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891293232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:13,278 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:13,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-11T04:27:13,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:13,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:13,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:13,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:13,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:13,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:13,412 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=343, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/13fdf9c5521341ea9a03d8d7611ed099 2024-12-11T04:27:13,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/c137075c24ae4ec59ba091ad54c4d444 is 50, key is test_row_0/B:col10/1733891232565/Put/seqid=0 2024-12-11T04:27:13,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742074_1250 (size=12301) 2024-12-11T04:27:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-11T04:27:13,431 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:13,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-11T04:27:13,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:13,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:13,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:13,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:13,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:13,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:13,584 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:13,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-11T04:27:13,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:13,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:13,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:13,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:13,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:13,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:13,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-11T04:27:13,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891293735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:13,737 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:13,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-11T04:27:13,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:13,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:13,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:13,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:13,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:13,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:13,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/c137075c24ae4ec59ba091ad54c4d444 2024-12-11T04:27:13,840 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/9b1cd38bb7274b39876cdee01be0a5c6 is 50, key is test_row_0/C:col10/1733891232565/Put/seqid=0 2024-12-11T04:27:13,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742075_1251 (size=12301) 2024-12-11T04:27:13,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/9b1cd38bb7274b39876cdee01be0a5c6 2024-12-11T04:27:13,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/13fdf9c5521341ea9a03d8d7611ed099 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/13fdf9c5521341ea9a03d8d7611ed099 2024-12-11T04:27:13,857 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/13fdf9c5521341ea9a03d8d7611ed099, entries=150, sequenceid=343, filesize=30.5 K 2024-12-11T04:27:13,859 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/c137075c24ae4ec59ba091ad54c4d444 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/c137075c24ae4ec59ba091ad54c4d444 2024-12-11T04:27:13,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/c137075c24ae4ec59ba091ad54c4d444, entries=150, sequenceid=343, filesize=12.0 K 2024-12-11T04:27:13,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/9b1cd38bb7274b39876cdee01be0a5c6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/9b1cd38bb7274b39876cdee01be0a5c6 2024-12-11T04:27:13,871 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/9b1cd38bb7274b39876cdee01be0a5c6, entries=150, sequenceid=343, filesize=12.0 K 2024-12-11T04:27:13,872 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 0d95822809793edddcee8d4c8425775e in 1306ms, sequenceid=343, compaction requested=false 2024-12-11T04:27:13,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:13,890 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:13,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-11T04:27:13,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:13,890 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:27:13,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:13,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:13,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:13,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:13,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:13,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:13,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c91effe2344244bda9f59b305b4ba479_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891232619/Put/seqid=0 2024-12-11T04:27:13,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742076_1252 (size=12454) 2024-12-11T04:27:13,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:13,921 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c91effe2344244bda9f59b305b4ba479_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c91effe2344244bda9f59b305b4ba479_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:13,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/c6b7b8a3fcd04e1bb5282f5edde7f58f, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:13,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/c6b7b8a3fcd04e1bb5282f5edde7f58f is 175, key is test_row_0/A:col10/1733891232619/Put/seqid=0 2024-12-11T04:27:13,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742077_1253 (size=31255) 2024-12-11T04:27:13,935 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=369, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/c6b7b8a3fcd04e1bb5282f5edde7f58f 2024-12-11T04:27:13,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/c9be41090e8a4b35a14da4141e2f0a3b is 50, key is test_row_0/B:col10/1733891232619/Put/seqid=0 2024-12-11T04:27:13,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742078_1254 (size=12301) 2024-12-11T04:27:14,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-11T04:27:14,347 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/c9be41090e8a4b35a14da4141e2f0a3b 2024-12-11T04:27:14,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:14,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:14,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:14,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891294364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:14,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:14,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891294366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:14,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/0b6a77d542964accad9963f916187ad1 is 50, key is test_row_0/C:col10/1733891232619/Put/seqid=0 2024-12-11T04:27:14,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742079_1255 (size=12301) 2024-12-11T04:27:14,374 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/0b6a77d542964accad9963f916187ad1 2024-12-11T04:27:14,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/c6b7b8a3fcd04e1bb5282f5edde7f58f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c6b7b8a3fcd04e1bb5282f5edde7f58f 2024-12-11T04:27:14,385 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c6b7b8a3fcd04e1bb5282f5edde7f58f, entries=150, sequenceid=369, filesize=30.5 K 2024-12-11T04:27:14,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/c9be41090e8a4b35a14da4141e2f0a3b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/c9be41090e8a4b35a14da4141e2f0a3b 2024-12-11T04:27:14,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:14,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35428 deadline: 1733891294385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:14,389 DEBUG [Thread-746 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8184 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., hostname=5f466b3719ec,39071,1733891180267, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T04:27:14,391 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/c9be41090e8a4b35a14da4141e2f0a3b, entries=150, sequenceid=369, filesize=12.0 K 2024-12-11T04:27:14,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/0b6a77d542964accad9963f916187ad1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/0b6a77d542964accad9963f916187ad1 2024-12-11T04:27:14,397 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/0b6a77d542964accad9963f916187ad1, entries=150, sequenceid=369, filesize=12.0 K 2024-12-11T04:27:14,398 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 0d95822809793edddcee8d4c8425775e in 507ms, sequenceid=369, compaction requested=true 2024-12-11T04:27:14,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:14,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:14,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-12-11T04:27:14,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-12-11T04:27:14,400 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-11T04:27:14,400 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2730 sec 2024-12-11T04:27:14,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 1.2770 sec 2024-12-11T04:27:14,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:14,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T04:27:14,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:14,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:14,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:14,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:14,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:14,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:14,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211eddf46bd0b32452da874f3e2c51106a2_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891234474/Put/seqid=0 2024-12-11T04:27:14,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742080_1256 (size=17534) 2024-12-11T04:27:14,514 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:14,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:14,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891294517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:14,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:14,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891294517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:14,518 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211eddf46bd0b32452da874f3e2c51106a2_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211eddf46bd0b32452da874f3e2c51106a2_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:14,520 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/1fbab0c0dca3445c9ad6fdca804251a0, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:14,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/1fbab0c0dca3445c9ad6fdca804251a0 is 175, key is test_row_0/A:col10/1733891234474/Put/seqid=0 2024-12-11T04:27:14,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742081_1257 (size=48639) 2024-12-11T04:27:14,537 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=383, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/1fbab0c0dca3445c9ad6fdca804251a0 2024-12-11T04:27:14,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/91bac4abd2184787864973db22104696 is 50, key is test_row_0/B:col10/1733891234474/Put/seqid=0 2024-12-11T04:27:14,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742082_1258 (size=12301) 2024-12-11T04:27:14,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/91bac4abd2184787864973db22104696 2024-12-11T04:27:14,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/256d162195724fae9847cb51451df5ae is 50, key is test_row_0/C:col10/1733891234474/Put/seqid=0 2024-12-11T04:27:14,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742083_1259 (size=12301) 2024-12-11T04:27:14,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:14,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891294619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:14,621 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:14,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891294619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:14,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:14,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35444 deadline: 1733891294746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:14,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:14,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891294822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:14,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:14,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891294823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:15,003 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/256d162195724fae9847cb51451df5ae 2024-12-11T04:27:15,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/1fbab0c0dca3445c9ad6fdca804251a0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/1fbab0c0dca3445c9ad6fdca804251a0 2024-12-11T04:27:15,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/1fbab0c0dca3445c9ad6fdca804251a0, entries=250, sequenceid=383, filesize=47.5 K 2024-12-11T04:27:15,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/91bac4abd2184787864973db22104696 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/91bac4abd2184787864973db22104696 2024-12-11T04:27:15,018 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/91bac4abd2184787864973db22104696, entries=150, sequenceid=383, filesize=12.0 K 2024-12-11T04:27:15,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/256d162195724fae9847cb51451df5ae as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/256d162195724fae9847cb51451df5ae 2024-12-11T04:27:15,024 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/256d162195724fae9847cb51451df5ae, entries=150, sequenceid=383, filesize=12.0 K 2024-12-11T04:27:15,024 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 0d95822809793edddcee8d4c8425775e in 551ms, sequenceid=383, compaction requested=true 2024-12-11T04:27:15,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:15,025 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:27:15,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:15,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:15,025 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:27:15,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:15,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:15,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0d95822809793edddcee8d4c8425775e:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:15,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:15,027 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143154 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:27:15,027 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/A is initiating minor compaction (all files) 2024-12-11T04:27:15,027 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/A in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:15,027 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/158bb97e6807422eb0fb47232dfada33, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/13fdf9c5521341ea9a03d8d7611ed099, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c6b7b8a3fcd04e1bb5282f5edde7f58f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/1fbab0c0dca3445c9ad6fdca804251a0] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=139.8 K 2024-12-11T04:27:15,027 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:15,027 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/158bb97e6807422eb0fb47232dfada33, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/13fdf9c5521341ea9a03d8d7611ed099, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c6b7b8a3fcd04e1bb5282f5edde7f58f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/1fbab0c0dca3445c9ad6fdca804251a0] 2024-12-11T04:27:15,028 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 158bb97e6807422eb0fb47232dfada33, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733891230303 2024-12-11T04:27:15,028 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:27:15,028 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13fdf9c5521341ea9a03d8d7611ed099, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733891231439 2024-12-11T04:27:15,028 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/B is initiating minor compaction (all files) 2024-12-11T04:27:15,028 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/B in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:15,028 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ba6805ab609442f5b1977d80a689809e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/c137075c24ae4ec59ba091ad54c4d444, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/c9be41090e8a4b35a14da4141e2f0a3b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/91bac4abd2184787864973db22104696] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=48.8 K 2024-12-11T04:27:15,029 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6b7b8a3fcd04e1bb5282f5edde7f58f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733891232615 2024-12-11T04:27:15,029 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ba6805ab609442f5b1977d80a689809e, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733891230303 2024-12-11T04:27:15,029 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fbab0c0dca3445c9ad6fdca804251a0, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733891234358 2024-12-11T04:27:15,029 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting c137075c24ae4ec59ba091ad54c4d444, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733891231439 2024-12-11T04:27:15,030 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting c9be41090e8a4b35a14da4141e2f0a3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733891232615 2024-12-11T04:27:15,030 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 91bac4abd2184787864973db22104696, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733891234359 2024-12-11T04:27:15,044 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:15,054 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#B#compaction#217 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:15,055 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/ffc507ea11414c8fbe7f91029d7a45a0 is 50, key is test_row_0/B:col10/1733891234474/Put/seqid=0 2024-12-11T04:27:15,056 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211452b75ebf03e41ad8654789bfa6bec3d_0d95822809793edddcee8d4c8425775e store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:15,059 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211452b75ebf03e41ad8654789bfa6bec3d_0d95822809793edddcee8d4c8425775e, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:15,059 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211452b75ebf03e41ad8654789bfa6bec3d_0d95822809793edddcee8d4c8425775e because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:15,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742084_1260 (size=4469) 2024-12-11T04:27:15,076 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#A#compaction#216 average throughput is 0.76 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:15,078 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/009eba850ea84ae187a9e7cbea23557c is 175, key is test_row_0/A:col10/1733891234474/Put/seqid=0 2024-12-11T04:27:15,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742086_1262 (size=32141) 2024-12-11T04:27:15,096 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/009eba850ea84ae187a9e7cbea23557c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/009eba850ea84ae187a9e7cbea23557c 2024-12-11T04:27:15,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742085_1261 (size=13187) 2024-12-11T04:27:15,101 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0d95822809793edddcee8d4c8425775e/A of 0d95822809793edddcee8d4c8425775e into 009eba850ea84ae187a9e7cbea23557c(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:15,101 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:15,101 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/A, priority=12, startTime=1733891235025; duration=0sec 2024-12-11T04:27:15,101 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:15,101 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:A 2024-12-11T04:27:15,101 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:27:15,105 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49954 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:27:15,105 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 0d95822809793edddcee8d4c8425775e/C is initiating minor compaction (all files) 2024-12-11T04:27:15,105 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0d95822809793edddcee8d4c8425775e/C in TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:15,106 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/ffc507ea11414c8fbe7f91029d7a45a0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ffc507ea11414c8fbe7f91029d7a45a0 2024-12-11T04:27:15,106 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/83024c6eb95141f5ae927034bb6b297c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/9b1cd38bb7274b39876cdee01be0a5c6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/0b6a77d542964accad9963f916187ad1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/256d162195724fae9847cb51451df5ae] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp, totalSize=48.8 K 2024-12-11T04:27:15,107 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83024c6eb95141f5ae927034bb6b297c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1733891230303 2024-12-11T04:27:15,108 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b1cd38bb7274b39876cdee01be0a5c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1733891231439 2024-12-11T04:27:15,108 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b6a77d542964accad9963f916187ad1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=369, earliestPutTs=1733891232615 2024-12-11T04:27:15,108 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 256d162195724fae9847cb51451df5ae, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=383, earliestPutTs=1733891234359 2024-12-11T04:27:15,112 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0d95822809793edddcee8d4c8425775e/B of 0d95822809793edddcee8d4c8425775e into ffc507ea11414c8fbe7f91029d7a45a0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:15,112 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:15,112 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/B, priority=12, startTime=1733891235025; duration=0sec 2024-12-11T04:27:15,112 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:15,112 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:B 2024-12-11T04:27:15,119 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d95822809793edddcee8d4c8425775e#C#compaction#218 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:15,120 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/bb751d8537e8405993ae1738b465c16b is 50, key is test_row_0/C:col10/1733891234474/Put/seqid=0 2024-12-11T04:27:15,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:15,126 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T04:27:15,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:15,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:15,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:15,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:15,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:15,127 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:15,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742087_1263 (size=13187) 2024-12-11T04:27:15,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110f228428b5164fb6a959fbc51a40da10_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891235125/Put/seqid=0 2024-12-11T04:27:15,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:15,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891295146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:15,149 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/bb751d8537e8405993ae1738b465c16b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/bb751d8537e8405993ae1738b465c16b 2024-12-11T04:27:15,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:15,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891295149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:15,155 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0d95822809793edddcee8d4c8425775e/C of 0d95822809793edddcee8d4c8425775e into bb751d8537e8405993ae1738b465c16b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:15,155 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:15,155 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e., storeName=0d95822809793edddcee8d4c8425775e/C, priority=12, startTime=1733891235025; duration=0sec 2024-12-11T04:27:15,156 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:15,156 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d95822809793edddcee8d4c8425775e:C 2024-12-11T04:27:15,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742088_1264 (size=14994) 2024-12-11T04:27:15,162 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:15,167 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110f228428b5164fb6a959fbc51a40da10_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110f228428b5164fb6a959fbc51a40da10_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:15,168 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/f62d8002464d4a9589999b1fed0686b1, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:15,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/f62d8002464d4a9589999b1fed0686b1 is 175, key is test_row_0/A:col10/1733891235125/Put/seqid=0 2024-12-11T04:27:15,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742089_1265 (size=39949) 2024-12-11T04:27:15,184 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=408, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/f62d8002464d4a9589999b1fed0686b1 2024-12-11T04:27:15,194 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/8bfc95fa3b4949f7b84a75d7d322c438 is 50, key is test_row_0/B:col10/1733891235125/Put/seqid=0 2024-12-11T04:27:15,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742090_1266 (size=12301) 2024-12-11T04:27:15,210 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/8bfc95fa3b4949f7b84a75d7d322c438 2024-12-11T04:27:15,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/037d28ff1407416faf4a047464d6119a is 50, key is test_row_0/C:col10/1733891235125/Put/seqid=0 2024-12-11T04:27:15,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-11T04:27:15,230 INFO [Thread-752 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-11T04:27:15,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:15,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-12-11T04:27:15,234 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:15,235 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:15,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-11T04:27:15,235 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:15,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742091_1267 (size=12301) 2024-12-11T04:27:15,241 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/037d28ff1407416faf4a047464d6119a 2024-12-11T04:27:15,244 DEBUG [Thread-753 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x517ff977 to 127.0.0.1:50078 2024-12-11T04:27:15,245 DEBUG [Thread-753 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:15,245 DEBUG [Thread-755 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3448d233 to 127.0.0.1:50078 2024-12-11T04:27:15,245 DEBUG [Thread-755 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:15,247 DEBUG [Thread-757 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a11164b to 127.0.0.1:50078 2024-12-11T04:27:15,247 DEBUG [Thread-757 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:15,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/f62d8002464d4a9589999b1fed0686b1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/f62d8002464d4a9589999b1fed0686b1 2024-12-11T04:27:15,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:15,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35470 deadline: 1733891295250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:15,251 DEBUG [Thread-759 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08a7e1dd to 127.0.0.1:50078 2024-12-11T04:27:15,251 DEBUG [Thread-759 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:15,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:15,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:35456 deadline: 1733891295253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:15,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/f62d8002464d4a9589999b1fed0686b1, entries=200, sequenceid=408, filesize=39.0 K 2024-12-11T04:27:15,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/8bfc95fa3b4949f7b84a75d7d322c438 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8bfc95fa3b4949f7b84a75d7d322c438 2024-12-11T04:27:15,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8bfc95fa3b4949f7b84a75d7d322c438, entries=150, sequenceid=408, filesize=12.0 K 2024-12-11T04:27:15,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/037d28ff1407416faf4a047464d6119a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/037d28ff1407416faf4a047464d6119a 2024-12-11T04:27:15,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/037d28ff1407416faf4a047464d6119a, entries=150, sequenceid=408, filesize=12.0 K 2024-12-11T04:27:15,263 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 0d95822809793edddcee8d4c8425775e in 137ms, sequenceid=408, compaction requested=false 2024-12-11T04:27:15,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:15,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-11T04:27:15,387 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:15,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-11T04:27:15,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:15,388 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T04:27:15,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:15,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:15,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:15,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:15,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:15,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:15,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115e34b8091dab481fb00c4d7603d02ee7_0d95822809793edddcee8d4c8425775e is 50, key is test_row_0/A:col10/1733891235140/Put/seqid=0 2024-12-11T04:27:15,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742092_1268 (size=12454) 2024-12-11T04:27:15,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:15,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. as already flushing 2024-12-11T04:27:15,453 DEBUG [Thread-750 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68f0be85 to 127.0.0.1:50078 2024-12-11T04:27:15,453 DEBUG [Thread-750 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:15,456 DEBUG [Thread-744 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x176c5c1b to 127.0.0.1:50078 2024-12-11T04:27:15,456 DEBUG [Thread-744 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:15,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-11T04:27:15,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:15,802 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115e34b8091dab481fb00c4d7603d02ee7_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412115e34b8091dab481fb00c4d7603d02ee7_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:15,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/b13e53d58dc64dae92aa9c4a4cf536cc, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:15,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/b13e53d58dc64dae92aa9c4a4cf536cc is 175, key is test_row_0/A:col10/1733891235140/Put/seqid=0 2024-12-11T04:27:15,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742093_1269 (size=31255) 2024-12-11T04:27:15,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-11T04:27:16,208 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=422, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/b13e53d58dc64dae92aa9c4a4cf536cc 2024-12-11T04:27:16,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/da696e99a8574da3bed9bb1aea3af572 is 50, key is test_row_0/B:col10/1733891235140/Put/seqid=0 2024-12-11T04:27:16,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742094_1270 (size=12301) 2024-12-11T04:27:16,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-11T04:27:16,620 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/da696e99a8574da3bed9bb1aea3af572 2024-12-11T04:27:16,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/d66f4a155e034ce2b9aca6a67a1b50f6 is 50, key is test_row_0/C:col10/1733891235140/Put/seqid=0 2024-12-11T04:27:16,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742095_1271 (size=12301) 2024-12-11T04:27:16,750 DEBUG [Thread-748 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c5c4716 to 127.0.0.1:50078 2024-12-11T04:27:16,750 DEBUG [Thread-748 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:17,030 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/d66f4a155e034ce2b9aca6a67a1b50f6 2024-12-11T04:27:17,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/b13e53d58dc64dae92aa9c4a4cf536cc as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b13e53d58dc64dae92aa9c4a4cf536cc 2024-12-11T04:27:17,038 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b13e53d58dc64dae92aa9c4a4cf536cc, entries=150, sequenceid=422, filesize=30.5 K 2024-12-11T04:27:17,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/da696e99a8574da3bed9bb1aea3af572 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/da696e99a8574da3bed9bb1aea3af572 2024-12-11T04:27:17,042 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/da696e99a8574da3bed9bb1aea3af572, entries=150, sequenceid=422, filesize=12.0 K 2024-12-11T04:27:17,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/d66f4a155e034ce2b9aca6a67a1b50f6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/d66f4a155e034ce2b9aca6a67a1b50f6 2024-12-11T04:27:17,046 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/d66f4a155e034ce2b9aca6a67a1b50f6, entries=150, sequenceid=422, filesize=12.0 K 2024-12-11T04:27:17,047 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=20.13 KB/20610 for 0d95822809793edddcee8d4c8425775e in 1659ms, sequenceid=422, compaction requested=true 2024-12-11T04:27:17,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:17,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:17,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-12-11T04:27:17,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-12-11T04:27:17,049 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-11T04:27:17,049 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8130 sec 2024-12-11T04:27:17,050 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 1.8170 sec 2024-12-11T04:27:17,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-11T04:27:17,338 INFO [Thread-752 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-12-11T04:27:18,617 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T04:27:19,817 DEBUG [Thread-742 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x433e2b26 to 127.0.0.1:50078 2024-12-11T04:27:19,817 DEBUG [Thread-742 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:24,333 DEBUG [master/5f466b3719ec:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 59414c8d1d0a5ff3a2f59cbb4ca8825b changed from -1.0 to 0.0, refreshing cache 2024-12-11T04:27:24,405 DEBUG [Thread-746 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x24f64590 to 127.0.0.1:50078 2024-12-11T04:27:24,405 DEBUG [Thread-746 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 105 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6304 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6317 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2689 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8067 rows 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2660 2024-12-11T04:27:24,405 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7979 rows 2024-12-11T04:27:24,405 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T04:27:24,405 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cca453a to 127.0.0.1:50078 2024-12-11T04:27:24,405 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:24,412 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-11T04:27:24,412 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-11T04:27:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-11T04:27:24,415 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891244415"}]},"ts":"1733891244415"} 2024-12-11T04:27:24,416 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-11T04:27:24,418 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-11T04:27:24,419 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T04:27:24,420 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0d95822809793edddcee8d4c8425775e, UNASSIGN}] 2024-12-11T04:27:24,421 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0d95822809793edddcee8d4c8425775e, UNASSIGN 2024-12-11T04:27:24,421 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=0d95822809793edddcee8d4c8425775e, regionState=CLOSING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:24,422 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T04:27:24,422 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; CloseRegionProcedure 0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:27:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-11T04:27:24,573 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:24,573 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(124): Close 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:24,574 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T04:27:24,574 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1681): Closing 0d95822809793edddcee8d4c8425775e, disabling compactions & flushes 2024-12-11T04:27:24,574 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:24,574 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:24,574 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. after waiting 0 ms 2024-12-11T04:27:24,574 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:24,574 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(2837): Flushing 0d95822809793edddcee8d4c8425775e 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-11T04:27:24,574 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=A 2024-12-11T04:27:24,574 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:24,574 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=B 2024-12-11T04:27:24,574 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:24,574 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0d95822809793edddcee8d4c8425775e, store=C 2024-12-11T04:27:24,574 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:24,580 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211339014bd40a546099e15f24825ccbb71_0d95822809793edddcee8d4c8425775e is 50, key is test_row_1/A:col10/1733891239816/Put/seqid=0 2024-12-11T04:27:24,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742096_1272 (size=9914) 2024-12-11T04:27:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-11T04:27:24,984 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:24,989 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211339014bd40a546099e15f24825ccbb71_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211339014bd40a546099e15f24825ccbb71_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:24,990 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/3e52e028a1e44e4680040c598873e0e6, store: [table=TestAcidGuarantees family=A region=0d95822809793edddcee8d4c8425775e] 2024-12-11T04:27:24,990 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/3e52e028a1e44e4680040c598873e0e6 is 175, key is test_row_1/A:col10/1733891239816/Put/seqid=0 2024-12-11T04:27:24,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742097_1273 (size=22561) 2024-12-11T04:27:25,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-11T04:27:25,394 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=430, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/3e52e028a1e44e4680040c598873e0e6 2024-12-11T04:27:25,401 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/8a24b162b7a644018dc12f95500758aa is 50, key is test_row_1/B:col10/1733891239816/Put/seqid=0 2024-12-11T04:27:25,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742098_1274 (size=9857) 2024-12-11T04:27:25,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-11T04:27:25,806 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/8a24b162b7a644018dc12f95500758aa 2024-12-11T04:27:25,812 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/2331105b41954459a3647acb4a86a9d6 is 50, key is test_row_1/C:col10/1733891239816/Put/seqid=0 2024-12-11T04:27:25,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742099_1275 (size=9857) 2024-12-11T04:27:26,217 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/2331105b41954459a3647acb4a86a9d6 2024-12-11T04:27:26,221 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/A/3e52e028a1e44e4680040c598873e0e6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/3e52e028a1e44e4680040c598873e0e6 2024-12-11T04:27:26,225 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/3e52e028a1e44e4680040c598873e0e6, entries=100, sequenceid=430, filesize=22.0 K 2024-12-11T04:27:26,226 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/B/8a24b162b7a644018dc12f95500758aa as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8a24b162b7a644018dc12f95500758aa 2024-12-11T04:27:26,229 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8a24b162b7a644018dc12f95500758aa, entries=100, sequenceid=430, filesize=9.6 K 2024-12-11T04:27:26,230 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/.tmp/C/2331105b41954459a3647acb4a86a9d6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/2331105b41954459a3647acb4a86a9d6 2024-12-11T04:27:26,233 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/2331105b41954459a3647acb4a86a9d6, entries=100, sequenceid=430, filesize=9.6 K 2024-12-11T04:27:26,234 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 0d95822809793edddcee8d4c8425775e in 1660ms, sequenceid=430, compaction requested=true 2024-12-11T04:27:26,234 DEBUG [StoreCloser-TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/41d1e8a514224734baf0c8eddd03d60b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b8f776a81ef04fdd9cba2406e3b5679b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/4f6c9becba1c4f4aaf4a6b5f6637c684, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/efd67f94c2d64d3a9721e28395bc75e7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/feaf3392aa884218aef7077c446a1852, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/0bd82e1ebe194af9853c25db2f10dde8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/3445aab6619542ee9e4278f4a36e94d3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/34c92aabc14541869e1fcdb85393dc55, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/faa20e9f5c0248b5bd836076bc2294f0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/676e671cdce5456eb78f84e85cc07a24, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/690c535e3c8f4f428d3de00a6875626e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/875f8896158a4b529220b5ea2da4649d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c81931684f2e4b9caeeeb5328e37963f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/836e6d75a39148a68ad6b249dc731a35, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b0168a1b63744731ba362cfa27978028, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/724d2836b5634efbbb5b25ad2b1bc1a6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/648251a83c164e13a6cd4f05b7a5ac29, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c36198d8fdfc408ab9b03f82ce94cb8b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/d911735e4ceb4cb5ab147c5463c02ce2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/9a263894cbad4b48a1d1ffeb55e97a73, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/e9f8ea4004d94d9bbc62fe742eb22c37, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/bc6d3147a64642288ff6640e6d761b8d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/158bb97e6807422eb0fb47232dfada33, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/13fdf9c5521341ea9a03d8d7611ed099, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c6b7b8a3fcd04e1bb5282f5edde7f58f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/1fbab0c0dca3445c9ad6fdca804251a0] to archive 2024-12-11T04:27:26,235 DEBUG [StoreCloser-TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:27:26,239 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/41d1e8a514224734baf0c8eddd03d60b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/41d1e8a514224734baf0c8eddd03d60b 2024-12-11T04:27:26,239 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b8f776a81ef04fdd9cba2406e3b5679b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b8f776a81ef04fdd9cba2406e3b5679b 2024-12-11T04:27:26,239 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/4f6c9becba1c4f4aaf4a6b5f6637c684 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/4f6c9becba1c4f4aaf4a6b5f6637c684 2024-12-11T04:27:26,239 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/efd67f94c2d64d3a9721e28395bc75e7 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/efd67f94c2d64d3a9721e28395bc75e7 2024-12-11T04:27:26,240 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/feaf3392aa884218aef7077c446a1852 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/feaf3392aa884218aef7077c446a1852 2024-12-11T04:27:26,242 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/0bd82e1ebe194af9853c25db2f10dde8 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/0bd82e1ebe194af9853c25db2f10dde8 2024-12-11T04:27:26,242 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/3445aab6619542ee9e4278f4a36e94d3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/3445aab6619542ee9e4278f4a36e94d3 2024-12-11T04:27:26,242 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/875f8896158a4b529220b5ea2da4649d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/875f8896158a4b529220b5ea2da4649d 2024-12-11T04:27:26,242 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/676e671cdce5456eb78f84e85cc07a24 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/676e671cdce5456eb78f84e85cc07a24 2024-12-11T04:27:26,242 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/690c535e3c8f4f428d3de00a6875626e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/690c535e3c8f4f428d3de00a6875626e 2024-12-11T04:27:26,243 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/34c92aabc14541869e1fcdb85393dc55 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/34c92aabc14541869e1fcdb85393dc55 2024-12-11T04:27:26,243 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/faa20e9f5c0248b5bd836076bc2294f0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/faa20e9f5c0248b5bd836076bc2294f0 2024-12-11T04:27:26,243 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c81931684f2e4b9caeeeb5328e37963f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c81931684f2e4b9caeeeb5328e37963f 2024-12-11T04:27:26,244 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/836e6d75a39148a68ad6b249dc731a35 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/836e6d75a39148a68ad6b249dc731a35 2024-12-11T04:27:26,244 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/648251a83c164e13a6cd4f05b7a5ac29 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/648251a83c164e13a6cd4f05b7a5ac29 2024-12-11T04:27:26,244 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/724d2836b5634efbbb5b25ad2b1bc1a6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/724d2836b5634efbbb5b25ad2b1bc1a6 2024-12-11T04:27:26,245 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b0168a1b63744731ba362cfa27978028 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b0168a1b63744731ba362cfa27978028 2024-12-11T04:27:26,245 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/d911735e4ceb4cb5ab147c5463c02ce2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/d911735e4ceb4cb5ab147c5463c02ce2 2024-12-11T04:27:26,245 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c36198d8fdfc408ab9b03f82ce94cb8b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c36198d8fdfc408ab9b03f82ce94cb8b 2024-12-11T04:27:26,245 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/9a263894cbad4b48a1d1ffeb55e97a73 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/9a263894cbad4b48a1d1ffeb55e97a73 2024-12-11T04:27:26,246 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/e9f8ea4004d94d9bbc62fe742eb22c37 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/e9f8ea4004d94d9bbc62fe742eb22c37 2024-12-11T04:27:26,246 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/bc6d3147a64642288ff6640e6d761b8d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/bc6d3147a64642288ff6640e6d761b8d 2024-12-11T04:27:26,246 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/158bb97e6807422eb0fb47232dfada33 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/158bb97e6807422eb0fb47232dfada33 2024-12-11T04:27:26,246 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/13fdf9c5521341ea9a03d8d7611ed099 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/13fdf9c5521341ea9a03d8d7611ed099 2024-12-11T04:27:26,246 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/1fbab0c0dca3445c9ad6fdca804251a0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/1fbab0c0dca3445c9ad6fdca804251a0 2024-12-11T04:27:26,246 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c6b7b8a3fcd04e1bb5282f5edde7f58f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/c6b7b8a3fcd04e1bb5282f5edde7f58f 2024-12-11T04:27:26,248 DEBUG [StoreCloser-TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/5a701796440c407e9b0190a20662fd73, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/32e6d222382b4effadc89afdde7c0a3f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/05315822d1914dd6b2847d834ff6891d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/f589fe2180934ebfba3de339f165cb78, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/2da7b262b6e64efbbf4fa8aa51ff7347, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ccfed7cc4c514dc5bb7b02d65504bc45, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/fcb1fe402f274755ac2cd971bab76cdf, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/a359f058294e4187b85ad1cafe775903, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/d5b3bdff986c4f6f9d72c84280dee44b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/67130e258d704f0f8120de2945338c61, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/407d671bf8f24bf89711f70c89e40c1a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/813a4c1ccbda4899943d73499c079d96, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/7ea9fa8655844084a613a6acb36e6e25, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/66228bec15bd4b30920ecb4a7a2b3d31, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8c2f246c30a54aab99c2dbc6a232e457, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/a7e45153f13d4b2cac4e212fbf3ea028, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/f21e76840718427da4a38ae87e2e871e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/5f01572e7fbc4f5facb0262e04054a7b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/06636c96ed6e4667be70c50530d9bb1b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/0e6a0dead41146c3bcf2900b765a7fbe, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/0ef39148691145be9da6f25ba8cf1f50, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ba6805ab609442f5b1977d80a689809e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ccd1de5a62e3405292c72712c695e93b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/c137075c24ae4ec59ba091ad54c4d444, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/c9be41090e8a4b35a14da4141e2f0a3b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/91bac4abd2184787864973db22104696] to archive 2024-12-11T04:27:26,248 DEBUG [StoreCloser-TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:27:26,250 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/05315822d1914dd6b2847d834ff6891d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/05315822d1914dd6b2847d834ff6891d 2024-12-11T04:27:26,250 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/5a701796440c407e9b0190a20662fd73 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/5a701796440c407e9b0190a20662fd73 2024-12-11T04:27:26,250 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/f589fe2180934ebfba3de339f165cb78 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/f589fe2180934ebfba3de339f165cb78 2024-12-11T04:27:26,251 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ccfed7cc4c514dc5bb7b02d65504bc45 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ccfed7cc4c514dc5bb7b02d65504bc45 2024-12-11T04:27:26,251 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/2da7b262b6e64efbbf4fa8aa51ff7347 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/2da7b262b6e64efbbf4fa8aa51ff7347 2024-12-11T04:27:26,251 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/32e6d222382b4effadc89afdde7c0a3f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/32e6d222382b4effadc89afdde7c0a3f 2024-12-11T04:27:26,251 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/fcb1fe402f274755ac2cd971bab76cdf to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/fcb1fe402f274755ac2cd971bab76cdf 2024-12-11T04:27:26,252 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/d5b3bdff986c4f6f9d72c84280dee44b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/d5b3bdff986c4f6f9d72c84280dee44b 2024-12-11T04:27:26,252 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/a359f058294e4187b85ad1cafe775903 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/a359f058294e4187b85ad1cafe775903 2024-12-11T04:27:26,252 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/67130e258d704f0f8120de2945338c61 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/67130e258d704f0f8120de2945338c61 2024-12-11T04:27:26,252 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/407d671bf8f24bf89711f70c89e40c1a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/407d671bf8f24bf89711f70c89e40c1a 2024-12-11T04:27:26,253 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/813a4c1ccbda4899943d73499c079d96 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/813a4c1ccbda4899943d73499c079d96 2024-12-11T04:27:26,254 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/7ea9fa8655844084a613a6acb36e6e25 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/7ea9fa8655844084a613a6acb36e6e25 2024-12-11T04:27:26,254 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8c2f246c30a54aab99c2dbc6a232e457 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8c2f246c30a54aab99c2dbc6a232e457 2024-12-11T04:27:26,254 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/66228bec15bd4b30920ecb4a7a2b3d31 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/66228bec15bd4b30920ecb4a7a2b3d31 2024-12-11T04:27:26,254 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/a7e45153f13d4b2cac4e212fbf3ea028 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/a7e45153f13d4b2cac4e212fbf3ea028 2024-12-11T04:27:26,255 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/f21e76840718427da4a38ae87e2e871e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/f21e76840718427da4a38ae87e2e871e 2024-12-11T04:27:26,255 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/5f01572e7fbc4f5facb0262e04054a7b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/5f01572e7fbc4f5facb0262e04054a7b 2024-12-11T04:27:26,255 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/06636c96ed6e4667be70c50530d9bb1b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/06636c96ed6e4667be70c50530d9bb1b 2024-12-11T04:27:26,256 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/0e6a0dead41146c3bcf2900b765a7fbe to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/0e6a0dead41146c3bcf2900b765a7fbe 2024-12-11T04:27:26,256 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/0ef39148691145be9da6f25ba8cf1f50 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/0ef39148691145be9da6f25ba8cf1f50 2024-12-11T04:27:26,256 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ba6805ab609442f5b1977d80a689809e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ba6805ab609442f5b1977d80a689809e 2024-12-11T04:27:26,257 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ccd1de5a62e3405292c72712c695e93b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ccd1de5a62e3405292c72712c695e93b 2024-12-11T04:27:26,257 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/c9be41090e8a4b35a14da4141e2f0a3b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/c9be41090e8a4b35a14da4141e2f0a3b 2024-12-11T04:27:26,257 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/91bac4abd2184787864973db22104696 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/91bac4abd2184787864973db22104696 2024-12-11T04:27:26,257 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/c137075c24ae4ec59ba091ad54c4d444 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/c137075c24ae4ec59ba091ad54c4d444 2024-12-11T04:27:26,262 DEBUG [StoreCloser-TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/a1522fa48c484388aeae940e234e3686, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/078a1c850af34b85b5752a6a3ac4adee, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/3c1c6bcf3c1245cfb11742d0560a28ab, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/5f6fa41e8d93487ba38ce9b3ceafb846, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/092ca351e067450f89dcf623dbb93f2a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/53104bda3cb44bb192909fb999c2a2db, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/5387b468400b4091b50b63d58edce9f5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/1c796dd712a94fcf8599d73737c7a464, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/7075cfeae58a49a7a8b1bb23cda171f7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/09d08f1128e34764b37405d10f895c46, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/9763c4c0977c4debaa9bb043fc73f005, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/56279eb499d6407b86cb4059082d6634, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/539ebc8558c74a2db98c9dd0d529cfaa, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/a83cddd1dbb4413a9f6614e490b82972, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/6b3d083986f1431f91d1ca3278fbd6d1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/bb35918bf69746b5ac84c4d529599c70, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/00fc3950340a46b88172189f58ef9940, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/604a413f81364431af096438060cf63f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/faa076d95f1b49ceb02adc9c7ee44c6f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/06179e1773bb4701b0c4209a833466aa, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/d54c45f05e4d4802882ea6e45609b9a3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/83024c6eb95141f5ae927034bb6b297c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/600ca7ecdef7428d836248b8326befb1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/9b1cd38bb7274b39876cdee01be0a5c6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/0b6a77d542964accad9963f916187ad1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/256d162195724fae9847cb51451df5ae] to archive 2024-12-11T04:27:26,263 DEBUG [StoreCloser-TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:27:26,265 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/3c1c6bcf3c1245cfb11742d0560a28ab to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/3c1c6bcf3c1245cfb11742d0560a28ab 2024-12-11T04:27:26,266 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/a1522fa48c484388aeae940e234e3686 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/a1522fa48c484388aeae940e234e3686 2024-12-11T04:27:26,266 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/078a1c850af34b85b5752a6a3ac4adee to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/078a1c850af34b85b5752a6a3ac4adee 2024-12-11T04:27:26,266 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/53104bda3cb44bb192909fb999c2a2db to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/53104bda3cb44bb192909fb999c2a2db 2024-12-11T04:27:26,267 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/5f6fa41e8d93487ba38ce9b3ceafb846 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/5f6fa41e8d93487ba38ce9b3ceafb846 2024-12-11T04:27:26,267 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/092ca351e067450f89dcf623dbb93f2a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/092ca351e067450f89dcf623dbb93f2a 2024-12-11T04:27:26,267 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/1c796dd712a94fcf8599d73737c7a464 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/1c796dd712a94fcf8599d73737c7a464 2024-12-11T04:27:26,267 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/5387b468400b4091b50b63d58edce9f5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/5387b468400b4091b50b63d58edce9f5 2024-12-11T04:27:26,268 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/7075cfeae58a49a7a8b1bb23cda171f7 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/7075cfeae58a49a7a8b1bb23cda171f7 2024-12-11T04:27:26,269 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/09d08f1128e34764b37405d10f895c46 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/09d08f1128e34764b37405d10f895c46 2024-12-11T04:27:26,269 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/539ebc8558c74a2db98c9dd0d529cfaa to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/539ebc8558c74a2db98c9dd0d529cfaa 2024-12-11T04:27:26,269 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/56279eb499d6407b86cb4059082d6634 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/56279eb499d6407b86cb4059082d6634 2024-12-11T04:27:26,269 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/9763c4c0977c4debaa9bb043fc73f005 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/9763c4c0977c4debaa9bb043fc73f005 2024-12-11T04:27:26,269 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/a83cddd1dbb4413a9f6614e490b82972 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/a83cddd1dbb4413a9f6614e490b82972 2024-12-11T04:27:26,270 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/6b3d083986f1431f91d1ca3278fbd6d1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/6b3d083986f1431f91d1ca3278fbd6d1 2024-12-11T04:27:26,270 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/bb35918bf69746b5ac84c4d529599c70 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/bb35918bf69746b5ac84c4d529599c70 2024-12-11T04:27:26,271 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/00fc3950340a46b88172189f58ef9940 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/00fc3950340a46b88172189f58ef9940 2024-12-11T04:27:26,271 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/604a413f81364431af096438060cf63f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/604a413f81364431af096438060cf63f 2024-12-11T04:27:26,272 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/d54c45f05e4d4802882ea6e45609b9a3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/d54c45f05e4d4802882ea6e45609b9a3 2024-12-11T04:27:26,272 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/faa076d95f1b49ceb02adc9c7ee44c6f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/faa076d95f1b49ceb02adc9c7ee44c6f 2024-12-11T04:27:26,272 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/06179e1773bb4701b0c4209a833466aa to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/06179e1773bb4701b0c4209a833466aa 2024-12-11T04:27:26,272 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/83024c6eb95141f5ae927034bb6b297c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/83024c6eb95141f5ae927034bb6b297c 2024-12-11T04:27:26,272 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/600ca7ecdef7428d836248b8326befb1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/600ca7ecdef7428d836248b8326befb1 2024-12-11T04:27:26,272 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/0b6a77d542964accad9963f916187ad1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/0b6a77d542964accad9963f916187ad1 2024-12-11T04:27:26,272 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/9b1cd38bb7274b39876cdee01be0a5c6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/9b1cd38bb7274b39876cdee01be0a5c6 2024-12-11T04:27:26,273 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/256d162195724fae9847cb51451df5ae to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/256d162195724fae9847cb51451df5ae 2024-12-11T04:27:26,277 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/recovered.edits/433.seqid, newMaxSeqId=433, maxSeqId=4 2024-12-11T04:27:26,277 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e. 2024-12-11T04:27:26,277 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] regionserver.HRegion(1635): Region close journal for 0d95822809793edddcee8d4c8425775e: 2024-12-11T04:27:26,278 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=70}] handler.UnassignRegionHandler(170): Closed 0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,279 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=0d95822809793edddcee8d4c8425775e, regionState=CLOSED 2024-12-11T04:27:26,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-11T04:27:26,280 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; CloseRegionProcedure 0d95822809793edddcee8d4c8425775e, server=5f466b3719ec,39071,1733891180267 in 1.8570 sec 2024-12-11T04:27:26,281 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-11T04:27:26,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0d95822809793edddcee8d4c8425775e, UNASSIGN in 1.8600 sec 2024-12-11T04:27:26,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-11T04:27:26,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8630 sec 2024-12-11T04:27:26,284 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891246284"}]},"ts":"1733891246284"} 2024-12-11T04:27:26,284 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-11T04:27:26,286 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-11T04:27:26,287 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8750 sec 2024-12-11T04:27:26,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-11T04:27:26,518 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-11T04:27:26,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-11T04:27:26,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:26,520 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=71, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:26,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-11T04:27:26,521 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=71, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:26,522 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,525 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/recovered.edits] 2024-12-11T04:27:26,528 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/009eba850ea84ae187a9e7cbea23557c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/009eba850ea84ae187a9e7cbea23557c 2024-12-11T04:27:26,528 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/3e52e028a1e44e4680040c598873e0e6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/3e52e028a1e44e4680040c598873e0e6 2024-12-11T04:27:26,528 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/f62d8002464d4a9589999b1fed0686b1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/f62d8002464d4a9589999b1fed0686b1 2024-12-11T04:27:26,528 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b13e53d58dc64dae92aa9c4a4cf536cc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/A/b13e53d58dc64dae92aa9c4a4cf536cc 2024-12-11T04:27:26,532 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8a24b162b7a644018dc12f95500758aa to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8a24b162b7a644018dc12f95500758aa 2024-12-11T04:27:26,532 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8bfc95fa3b4949f7b84a75d7d322c438 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/8bfc95fa3b4949f7b84a75d7d322c438 2024-12-11T04:27:26,532 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ffc507ea11414c8fbe7f91029d7a45a0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/ffc507ea11414c8fbe7f91029d7a45a0 2024-12-11T04:27:26,532 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/da696e99a8574da3bed9bb1aea3af572 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/B/da696e99a8574da3bed9bb1aea3af572 2024-12-11T04:27:26,536 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/037d28ff1407416faf4a047464d6119a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/037d28ff1407416faf4a047464d6119a 2024-12-11T04:27:26,536 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/2331105b41954459a3647acb4a86a9d6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/2331105b41954459a3647acb4a86a9d6 2024-12-11T04:27:26,536 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/d66f4a155e034ce2b9aca6a67a1b50f6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/d66f4a155e034ce2b9aca6a67a1b50f6 2024-12-11T04:27:26,536 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/bb751d8537e8405993ae1738b465c16b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/C/bb751d8537e8405993ae1738b465c16b 2024-12-11T04:27:26,539 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/recovered.edits/433.seqid to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e/recovered.edits/433.seqid 2024-12-11T04:27:26,539 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,539 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-11T04:27:26,540 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-11T04:27:26,541 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-11T04:27:26,550 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110f228428b5164fb6a959fbc51a40da10_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110f228428b5164fb6a959fbc51a40da10_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,551 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121117bd3d8a0e174d80b3aebd815a7bd8e3_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121117bd3d8a0e174d80b3aebd815a7bd8e3_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,551 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110f623bf96455498aaae329355da2a3ae_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110f623bf96455498aaae329355da2a3ae_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,551 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211019dcc6b9cad464db788b57c69f2db76_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211019dcc6b9cad464db788b57c69f2db76_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,551 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211339014bd40a546099e15f24825ccbb71_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211339014bd40a546099e15f24825ccbb71_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,551 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121124ac81f825734ad18b06f760957dc8b4_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121124ac81f825734ad18b06f760957dc8b4_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,551 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121124374714dad44b2e9dc8612c79b36c91_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121124374714dad44b2e9dc8612c79b36c91_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,551 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114107cb558c8b4611b0c425d54ee5818b_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114107cb558c8b4611b0c425d54ee5818b_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,553 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211436ab78aed0a413e92a2419149231212_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211436ab78aed0a413e92a2419149231212_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,553 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412115e9d0d32eb8d4e53b85ac7a64a92474d_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412115e9d0d32eb8d4e53b85ac7a64a92474d_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,553 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117551996d473f4837b99e5c518ef12157_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117551996d473f4837b99e5c518ef12157_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,553 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121176549f4ea16c49168b3f09f6ace43bfd_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121176549f4ea16c49168b3f09f6ace43bfd_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,553 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412115e34b8091dab481fb00c4d7603d02ee7_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412115e34b8091dab481fb00c4d7603d02ee7_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,554 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211691e60cfdbac46dfb838ae690aa0c3cd_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211691e60cfdbac46dfb838ae690aa0c3cd_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,554 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211641a00a91cda49ac95d3036111aaef9a_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211641a00a91cda49ac95d3036111aaef9a_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,554 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117784dafe06a6450fb6eb4ff440b17e67_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117784dafe06a6450fb6eb4ff440b17e67_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,555 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121186c99547f8b3402396c908e70fb2d6c0_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121186c99547f8b3402396c908e70fb2d6c0_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,556 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412119a41e4bec25644c78c4333a94a430bb0_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412119a41e4bec25644c78c4333a94a430bb0_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,556 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117ae0b9d9a0354386ab8aa73409d3d1ff_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117ae0b9d9a0354386ab8aa73409d3d1ff_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,556 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211f2acef871c17416b88e35319e9db1f13_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211f2acef871c17416b88e35319e9db1f13_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,556 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c91effe2344244bda9f59b305b4ba479_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c91effe2344244bda9f59b305b4ba479_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,556 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211eddf46bd0b32452da874f3e2c51106a2_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211eddf46bd0b32452da874f3e2c51106a2_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,556 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211ca6d2946b9d14c56990dcdb5390348d2_0d95822809793edddcee8d4c8425775e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211ca6d2946b9d14c56990dcdb5390348d2_0d95822809793edddcee8d4c8425775e 2024-12-11T04:27:26,557 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-11T04:27:26,559 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=71, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:26,561 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-11T04:27:26,563 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-11T04:27:26,564 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=71, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:26,564 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-11T04:27:26,564 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733891246564"}]},"ts":"9223372036854775807"} 2024-12-11T04:27:26,566 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-11T04:27:26,566 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 0d95822809793edddcee8d4c8425775e, NAME => 'TestAcidGuarantees,,1733891212123.0d95822809793edddcee8d4c8425775e.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T04:27:26,566 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-11T04:27:26,566 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733891246566"}]},"ts":"9223372036854775807"} 2024-12-11T04:27:26,567 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-11T04:27:26,570 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=71, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:26,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 51 msec 2024-12-11T04:27:26,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-11T04:27:26,622 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-11T04:27:26,634 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=244 (was 246), OpenFileDescriptor=451 (was 454), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=353 (was 312) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3601 (was 3737) 2024-12-11T04:27:26,646 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=244, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=353, ProcessCount=11, AvailableMemoryMB=3600 2024-12-11T04:27:26,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T04:27:26,648 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T04:27:26,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=72, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:26,650 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T04:27:26,650 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:26,650 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 72 2024-12-11T04:27:26,651 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T04:27:26,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-12-11T04:27:26,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742100_1276 (size=963) 2024-12-11T04:27:26,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-12-11T04:27:26,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-12-11T04:27:27,059 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5 2024-12-11T04:27:27,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742101_1277 (size=53) 2024-12-11T04:27:27,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-12-11T04:27:27,465 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:27:27,465 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing d07fdbcaf0fe943be9e071492694f078, disabling compactions & flushes 2024-12-11T04:27:27,465 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:27,465 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:27,465 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. after waiting 0 ms 2024-12-11T04:27:27,465 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:27,465 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:27,465 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:27,466 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T04:27:27,467 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733891247466"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733891247466"}]},"ts":"1733891247466"} 2024-12-11T04:27:27,468 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T04:27:27,468 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T04:27:27,468 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891247468"}]},"ts":"1733891247468"} 2024-12-11T04:27:27,469 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-11T04:27:27,473 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d07fdbcaf0fe943be9e071492694f078, ASSIGN}] 2024-12-11T04:27:27,473 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d07fdbcaf0fe943be9e071492694f078, ASSIGN 2024-12-11T04:27:27,474 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=73, ppid=72, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=d07fdbcaf0fe943be9e071492694f078, ASSIGN; state=OFFLINE, location=5f466b3719ec,39071,1733891180267; forceNewPlan=false, retain=false 2024-12-11T04:27:27,624 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=73 updating hbase:meta row=d07fdbcaf0fe943be9e071492694f078, regionState=OPENING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:27,625 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; OpenRegionProcedure d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:27:27,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-12-11T04:27:27,777 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:27,780 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:27,780 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7285): Opening region: {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} 2024-12-11T04:27:27,780 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:27,780 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:27:27,781 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7327): checking encryption for d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:27,781 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(7330): checking classloading for d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:27,782 INFO [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:27,783 INFO [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:27:27,783 INFO [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d07fdbcaf0fe943be9e071492694f078 columnFamilyName A 2024-12-11T04:27:27,783 DEBUG [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:27,784 INFO [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] regionserver.HStore(327): Store=d07fdbcaf0fe943be9e071492694f078/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:27:27,784 INFO [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:27,785 INFO [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:27:27,785 INFO [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d07fdbcaf0fe943be9e071492694f078 columnFamilyName B 2024-12-11T04:27:27,785 DEBUG [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:27,786 INFO [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] regionserver.HStore(327): Store=d07fdbcaf0fe943be9e071492694f078/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:27:27,786 INFO [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:27,786 INFO [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:27:27,787 INFO [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d07fdbcaf0fe943be9e071492694f078 columnFamilyName C 2024-12-11T04:27:27,787 DEBUG [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:27,787 INFO [StoreOpener-d07fdbcaf0fe943be9e071492694f078-1 {}] regionserver.HStore(327): Store=d07fdbcaf0fe943be9e071492694f078/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:27:27,787 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:27,788 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:27,788 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:27,789 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T04:27:27,790 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1085): writing seq id for d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:27,792 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T04:27:27,792 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1102): Opened d07fdbcaf0fe943be9e071492694f078; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69921802, jitterRate=0.04191604256629944}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T04:27:27,793 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegion(1001): Region open journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:27,793 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., pid=74, masterSystemTime=1733891247777 2024-12-11T04:27:27,794 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:27,795 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=74}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:27,795 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=73 updating hbase:meta row=d07fdbcaf0fe943be9e071492694f078, regionState=OPEN, openSeqNum=2, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:27,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-11T04:27:27,797 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; OpenRegionProcedure d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 in 171 msec 2024-12-11T04:27:27,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=73, resume processing ppid=72 2024-12-11T04:27:27,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=72, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d07fdbcaf0fe943be9e071492694f078, ASSIGN in 324 msec 2024-12-11T04:27:27,798 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T04:27:27,799 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891247798"}]},"ts":"1733891247798"} 2024-12-11T04:27:27,799 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-11T04:27:27,802 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=72, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T04:27:27,803 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1540 sec 2024-12-11T04:27:28,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=72 2024-12-11T04:27:28,755 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 72 completed 2024-12-11T04:27:28,756 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c480dfb to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@683b64c3 2024-12-11T04:27:28,762 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ec09297, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:28,763 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:28,764 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53872, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:28,765 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T04:27:28,766 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T04:27:28,768 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34cb3991 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e55eb7 2024-12-11T04:27:28,772 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dfb20f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:28,773 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e9ae050 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a703d2 2024-12-11T04:27:28,777 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17cf7fc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:28,778 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2fef31f8 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14ed1e44 2024-12-11T04:27:28,781 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78b04266, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:28,783 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0eb04aeb to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72537a47 2024-12-11T04:27:28,786 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@88aa519, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:28,787 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a0e9c8f to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36642cb 2024-12-11T04:27:28,791 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e998dd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:28,792 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c299cfb to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e4c79b8 2024-12-11T04:27:28,795 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a78bf6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:28,797 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x605827c9 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d1403c3 2024-12-11T04:27:28,800 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328852db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:28,801 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3677bd4f to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3bf0ba59 2024-12-11T04:27:28,803 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b9e2976, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:28,804 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x521aad6f to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c86f707 2024-12-11T04:27:28,807 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56e9a678, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:28,808 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f5b2180 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34becda3 2024-12-11T04:27:28,810 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f7f772a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:28,813 DEBUG [hconnection-0x4c60562e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:28,814 DEBUG [hconnection-0x487dad1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:28,814 DEBUG [hconnection-0x34b0996d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:28,815 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53886, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:28,815 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53898, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:28,815 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53896, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:28,815 DEBUG [hconnection-0x3a6acb0d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:28,816 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53904, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:28,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:28,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-11T04:27:28,817 DEBUG [hconnection-0x682de5fc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:28,817 DEBUG [hconnection-0xa08aa1a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:28,818 DEBUG [hconnection-0x73987bde-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:28,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-11T04:27:28,819 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53906, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:28,819 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53912, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:28,819 DEBUG [hconnection-0x644d5aa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:28,819 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53926, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:28,820 DEBUG [hconnection-0x72b30896-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:28,820 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53936, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:28,820 DEBUG [hconnection-0x5434e681-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:28,821 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53942, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:28,822 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:28,823 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53952, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:28,823 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:28,823 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:28,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:28,828 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:27:28,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:28,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:28,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:28,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:28,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:28,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:28,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:28,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891308846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:28,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:28,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891308847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:28,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:28,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:28,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891308848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:28,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891308848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:28,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:28,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891308849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:28,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/d751ee1a0f2b4d07bf1758e334756765 is 50, key is test_row_0/A:col10/1733891248828/Put/seqid=0 2024-12-11T04:27:28,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742102_1278 (size=12001) 2024-12-11T04:27:28,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/d751ee1a0f2b4d07bf1758e334756765 2024-12-11T04:27:28,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-11T04:27:28,936 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/9ffae03e8f0b439a98e6e278fa53714f is 50, key is test_row_0/B:col10/1733891248828/Put/seqid=0 2024-12-11T04:27:28,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742103_1279 (size=12001) 2024-12-11T04:27:28,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/9ffae03e8f0b439a98e6e278fa53714f 2024-12-11T04:27:28,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:28,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891308953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:28,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:28,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891308954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:28,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:28,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891308954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:28,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:28,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891308956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:28,957 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:28,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891308956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:28,976 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:28,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-11T04:27:28,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:28,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:28,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:28,977 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:28,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:28,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:28,979 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/6556c5529e564dd18437b0fdcde7b7b4 is 50, key is test_row_0/C:col10/1733891248828/Put/seqid=0 2024-12-11T04:27:28,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742104_1280 (size=12001) 2024-12-11T04:27:29,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-11T04:27:29,130 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,130 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-11T04:27:29,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:29,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:29,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:29,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:29,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:29,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:29,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891309155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891309157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891309158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891309159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891309159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,287 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-11T04:27:29,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:29,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:29,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:29,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:29,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:29,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:29,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/6556c5529e564dd18437b0fdcde7b7b4 2024-12-11T04:27:29,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/d751ee1a0f2b4d07bf1758e334756765 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d751ee1a0f2b4d07bf1758e334756765 2024-12-11T04:27:29,397 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d751ee1a0f2b4d07bf1758e334756765, entries=150, sequenceid=14, filesize=11.7 K 2024-12-11T04:27:29,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/9ffae03e8f0b439a98e6e278fa53714f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/9ffae03e8f0b439a98e6e278fa53714f 2024-12-11T04:27:29,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/9ffae03e8f0b439a98e6e278fa53714f, entries=150, sequenceid=14, filesize=11.7 K 2024-12-11T04:27:29,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/6556c5529e564dd18437b0fdcde7b7b4 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6556c5529e564dd18437b0fdcde7b7b4 2024-12-11T04:27:29,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6556c5529e564dd18437b0fdcde7b7b4, entries=150, sequenceid=14, filesize=11.7 K 2024-12-11T04:27:29,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d07fdbcaf0fe943be9e071492694f078 in 580ms, sequenceid=14, compaction requested=false 2024-12-11T04:27:29,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:29,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-11T04:27:29,440 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-11T04:27:29,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:29,441 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T04:27:29,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:29,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:29,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:29,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:29,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:29,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:29,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/b7c85666f7744a6b9aa2af31f984ad92 is 50, key is test_row_0/A:col10/1733891248840/Put/seqid=0 2024-12-11T04:27:29,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:29,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:29,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891309469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891309468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891309470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891309471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891309473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742105_1281 (size=12001) 2024-12-11T04:27:29,482 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/b7c85666f7744a6b9aa2af31f984ad92 2024-12-11T04:27:29,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/ce274a16e02249c099d726a6b2b727ed is 50, key is test_row_0/B:col10/1733891248840/Put/seqid=0 2024-12-11T04:27:29,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742106_1282 (size=12001) 2024-12-11T04:27:29,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891309574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891309574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891309574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891309576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891309577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891309776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891309776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891309777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891309778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:29,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891309779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:29,918 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/ce274a16e02249c099d726a6b2b727ed 2024-12-11T04:27:29,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-11T04:27:29,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/ef73f29393544c2bb3e1e24fec352215 is 50, key is test_row_0/C:col10/1733891248840/Put/seqid=0 2024-12-11T04:27:29,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742107_1283 (size=12001) 2024-12-11T04:27:30,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891310079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891310080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891310080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891310081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891310082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,347 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/ef73f29393544c2bb3e1e24fec352215 2024-12-11T04:27:30,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/b7c85666f7744a6b9aa2af31f984ad92 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b7c85666f7744a6b9aa2af31f984ad92 2024-12-11T04:27:30,357 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b7c85666f7744a6b9aa2af31f984ad92, entries=150, sequenceid=37, filesize=11.7 K 2024-12-11T04:27:30,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/ce274a16e02249c099d726a6b2b727ed as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ce274a16e02249c099d726a6b2b727ed 2024-12-11T04:27:30,363 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ce274a16e02249c099d726a6b2b727ed, entries=150, sequenceid=37, filesize=11.7 K 2024-12-11T04:27:30,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/ef73f29393544c2bb3e1e24fec352215 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ef73f29393544c2bb3e1e24fec352215 2024-12-11T04:27:30,373 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ef73f29393544c2bb3e1e24fec352215, entries=150, sequenceid=37, filesize=11.7 K 2024-12-11T04:27:30,374 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for d07fdbcaf0fe943be9e071492694f078 in 933ms, sequenceid=37, compaction requested=false 2024-12-11T04:27:30,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:30,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:30,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-11T04:27:30,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-11T04:27:30,377 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-11T04:27:30,378 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5530 sec 2024-12-11T04:27:30,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.5620 sec 2024-12-11T04:27:30,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:30,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-11T04:27:30,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:30,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:30,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:30,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:30,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:30,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:30,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/f529e8ddda754fe886000789e59e4715 is 50, key is test_row_0/A:col10/1733891250583/Put/seqid=0 2024-12-11T04:27:30,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742108_1284 (size=12001) 2024-12-11T04:27:30,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891310597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891310600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891310600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891310600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891310601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891310701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891310703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891310704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891310704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,706 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891310704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891310904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891310906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891310906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891310907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:30,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891310907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:30,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-11T04:27:30,923 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-11T04:27:30,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:30,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-11T04:27:30,926 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:30,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-11T04:27:30,927 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:30,927 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:30,996 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/f529e8ddda754fe886000789e59e4715 2024-12-11T04:27:31,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/bced0a4717394a3887902beb61e12a2b is 50, key is test_row_0/B:col10/1733891250583/Put/seqid=0 2024-12-11T04:27:31,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742109_1285 (size=12001) 2024-12-11T04:27:31,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/bced0a4717394a3887902beb61e12a2b 2024-12-11T04:27:31,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-11T04:27:31,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/812cdc95a4eb4cfc8eb3e31841cab666 is 50, key is test_row_0/C:col10/1733891250583/Put/seqid=0 2024-12-11T04:27:31,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742110_1286 (size=12001) 2024-12-11T04:27:31,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/812cdc95a4eb4cfc8eb3e31841cab666 2024-12-11T04:27:31,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/f529e8ddda754fe886000789e59e4715 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/f529e8ddda754fe886000789e59e4715 2024-12-11T04:27:31,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/f529e8ddda754fe886000789e59e4715, entries=150, sequenceid=54, filesize=11.7 K 2024-12-11T04:27:31,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/bced0a4717394a3887902beb61e12a2b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/bced0a4717394a3887902beb61e12a2b 2024-12-11T04:27:31,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/bced0a4717394a3887902beb61e12a2b, entries=150, sequenceid=54, filesize=11.7 K 2024-12-11T04:27:31,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/812cdc95a4eb4cfc8eb3e31841cab666 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/812cdc95a4eb4cfc8eb3e31841cab666 2024-12-11T04:27:31,060 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/812cdc95a4eb4cfc8eb3e31841cab666, entries=150, sequenceid=54, filesize=11.7 K 2024-12-11T04:27:31,062 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for d07fdbcaf0fe943be9e071492694f078 in 476ms, sequenceid=54, compaction requested=true 2024-12-11T04:27:31,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:31,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:31,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:31,062 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:31,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:31,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:31,062 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:31,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:31,062 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:31,063 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:31,063 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/A is initiating minor compaction (all files) 2024-12-11T04:27:31,063 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/A in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:31,064 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d751ee1a0f2b4d07bf1758e334756765, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b7c85666f7744a6b9aa2af31f984ad92, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/f529e8ddda754fe886000789e59e4715] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=35.2 K 2024-12-11T04:27:31,064 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:31,064 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/B is initiating minor compaction (all files) 2024-12-11T04:27:31,064 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/B in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:31,064 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/9ffae03e8f0b439a98e6e278fa53714f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ce274a16e02249c099d726a6b2b727ed, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/bced0a4717394a3887902beb61e12a2b] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=35.2 K 2024-12-11T04:27:31,064 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting d751ee1a0f2b4d07bf1758e334756765, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733891248826 2024-12-11T04:27:31,065 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ffae03e8f0b439a98e6e278fa53714f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733891248826 2024-12-11T04:27:31,065 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7c85666f7744a6b9aa2af31f984ad92, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733891248840 2024-12-11T04:27:31,065 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ce274a16e02249c099d726a6b2b727ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733891248840 2024-12-11T04:27:31,065 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting f529e8ddda754fe886000789e59e4715, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733891250583 2024-12-11T04:27:31,066 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting bced0a4717394a3887902beb61e12a2b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733891250583 2024-12-11T04:27:31,079 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-11T04:27:31,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:31,080 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-11T04:27:31,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:31,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:31,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:31,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:31,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:31,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:31,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/c7f66a38e0d547bda7e192b29e5523ed is 50, key is test_row_0/A:col10/1733891250598/Put/seqid=0 2024-12-11T04:27:31,098 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#A#compaction#238 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:31,099 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/de59b864c537416bb52af9983fdbc6bf is 50, key is test_row_0/A:col10/1733891250583/Put/seqid=0 2024-12-11T04:27:31,114 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#B#compaction#239 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:31,114 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/eef115c015274204acce84bc26e5a3ed is 50, key is test_row_0/B:col10/1733891250583/Put/seqid=0 2024-12-11T04:27:31,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742111_1287 (size=12001) 2024-12-11T04:27:31,154 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/c7f66a38e0d547bda7e192b29e5523ed 2024-12-11T04:27:31,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742112_1288 (size=12104) 2024-12-11T04:27:31,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742113_1289 (size=12104) 2024-12-11T04:27:31,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/76f3a91de3ff4155bd728e34a8602195 is 50, key is test_row_0/B:col10/1733891250598/Put/seqid=0 2024-12-11T04:27:31,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742114_1290 (size=12001) 2024-12-11T04:27:31,177 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/de59b864c537416bb52af9983fdbc6bf as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/de59b864c537416bb52af9983fdbc6bf 2024-12-11T04:27:31,178 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/76f3a91de3ff4155bd728e34a8602195 2024-12-11T04:27:31,184 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/A of d07fdbcaf0fe943be9e071492694f078 into de59b864c537416bb52af9983fdbc6bf(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:31,184 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:31,184 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/A, priority=13, startTime=1733891251062; duration=0sec 2024-12-11T04:27:31,184 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:31,184 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:A 2024-12-11T04:27:31,185 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:31,187 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:31,187 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/C is initiating minor compaction (all files) 2024-12-11T04:27:31,187 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/C in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:31,187 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6556c5529e564dd18437b0fdcde7b7b4, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ef73f29393544c2bb3e1e24fec352215, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/812cdc95a4eb4cfc8eb3e31841cab666] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=35.2 K 2024-12-11T04:27:31,188 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6556c5529e564dd18437b0fdcde7b7b4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733891248826 2024-12-11T04:27:31,189 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef73f29393544c2bb3e1e24fec352215, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733891248840 2024-12-11T04:27:31,190 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 812cdc95a4eb4cfc8eb3e31841cab666, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733891250583 2024-12-11T04:27:31,195 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/eef115c015274204acce84bc26e5a3ed as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/eef115c015274204acce84bc26e5a3ed 2024-12-11T04:27:31,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/7d4f92a8c8bb474c910a25d0d09bf3bb is 50, key is test_row_0/C:col10/1733891250598/Put/seqid=0 2024-12-11T04:27:31,202 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#C#compaction#242 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:31,202 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/c12b282c791c4b9c891c7996720c29ab is 50, key is test_row_0/C:col10/1733891250583/Put/seqid=0 2024-12-11T04:27:31,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:31,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:31,216 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/B of d07fdbcaf0fe943be9e071492694f078 into eef115c015274204acce84bc26e5a3ed(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:31,216 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:31,216 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/B, priority=13, startTime=1733891251062; duration=0sec 2024-12-11T04:27:31,216 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:31,216 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:B 2024-12-11T04:27:31,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891311222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891311222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891311225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-11T04:27:31,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891311226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891311227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742115_1291 (size=12001) 2024-12-11T04:27:31,233 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/7d4f92a8c8bb474c910a25d0d09bf3bb 2024-12-11T04:27:31,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/c7f66a38e0d547bda7e192b29e5523ed as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/c7f66a38e0d547bda7e192b29e5523ed 2024-12-11T04:27:31,243 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/c7f66a38e0d547bda7e192b29e5523ed, entries=150, sequenceid=74, filesize=11.7 K 2024-12-11T04:27:31,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/76f3a91de3ff4155bd728e34a8602195 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/76f3a91de3ff4155bd728e34a8602195 2024-12-11T04:27:31,250 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/76f3a91de3ff4155bd728e34a8602195, entries=150, sequenceid=74, filesize=11.7 K 2024-12-11T04:27:31,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/7d4f92a8c8bb474c910a25d0d09bf3bb as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/7d4f92a8c8bb474c910a25d0d09bf3bb 2024-12-11T04:27:31,254 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/7d4f92a8c8bb474c910a25d0d09bf3bb, entries=150, sequenceid=74, filesize=11.7 K 2024-12-11T04:27:31,255 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for d07fdbcaf0fe943be9e071492694f078 in 175ms, sequenceid=74, compaction requested=false 2024-12-11T04:27:31,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:31,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:31,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-11T04:27:31,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-11T04:27:31,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-11T04:27:31,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 329 msec 2024-12-11T04:27:31,259 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 333 msec 2024-12-11T04:27:31,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742116_1292 (size=12104) 2024-12-11T04:27:31,268 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/c12b282c791c4b9c891c7996720c29ab as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/c12b282c791c4b9c891c7996720c29ab 2024-12-11T04:27:31,276 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/C of d07fdbcaf0fe943be9e071492694f078 into c12b282c791c4b9c891c7996720c29ab(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:31,276 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:31,276 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/C, priority=13, startTime=1733891251062; duration=0sec 2024-12-11T04:27:31,276 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:31,276 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:C 2024-12-11T04:27:31,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:31,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-11T04:27:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:31,330 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:31,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/a640cf02818847c19c96027244bedf33 is 50, key is test_row_0/A:col10/1733891251328/Put/seqid=0 2024-12-11T04:27:31,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742117_1293 (size=12001) 2024-12-11T04:27:31,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/a640cf02818847c19c96027244bedf33 2024-12-11T04:27:31,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891311343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891311344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891311345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891311345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891311346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/bcd2c286a6864a5b8e8240723137196a is 50, key is test_row_0/B:col10/1733891251328/Put/seqid=0 2024-12-11T04:27:31,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742118_1294 (size=12001) 2024-12-11T04:27:31,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891311446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891311448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891311448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891311450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891311452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-11T04:27:31,530 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-11T04:27:31,531 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:31,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-11T04:27:31,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-11T04:27:31,533 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:31,533 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:31,533 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:31,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-11T04:27:31,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891311648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891311651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891311651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891311654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891311655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,685 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,686 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-11T04:27:31,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:31,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:31,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:31,686 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:31,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:31,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:31,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/bcd2c286a6864a5b8e8240723137196a 2024-12-11T04:27:31,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/efc5baf81a2f4efcbab255620642d00d is 50, key is test_row_0/C:col10/1733891251328/Put/seqid=0 2024-12-11T04:27:31,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742119_1295 (size=12001) 2024-12-11T04:27:31,776 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/efc5baf81a2f4efcbab255620642d00d 2024-12-11T04:27:31,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/a640cf02818847c19c96027244bedf33 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/a640cf02818847c19c96027244bedf33 2024-12-11T04:27:31,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/a640cf02818847c19c96027244bedf33, entries=150, sequenceid=96, filesize=11.7 K 2024-12-11T04:27:31,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/bcd2c286a6864a5b8e8240723137196a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/bcd2c286a6864a5b8e8240723137196a 2024-12-11T04:27:31,788 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/bcd2c286a6864a5b8e8240723137196a, entries=150, sequenceid=96, filesize=11.7 K 2024-12-11T04:27:31,789 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/efc5baf81a2f4efcbab255620642d00d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/efc5baf81a2f4efcbab255620642d00d 2024-12-11T04:27:31,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/efc5baf81a2f4efcbab255620642d00d, entries=150, sequenceid=96, filesize=11.7 K 2024-12-11T04:27:31,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for d07fdbcaf0fe943be9e071492694f078 in 464ms, sequenceid=96, compaction requested=true 2024-12-11T04:27:31,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:31,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:31,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:31,794 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:31,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:31,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:31,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:31,794 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:31,794 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:31,795 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:31,796 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:31,796 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/B is initiating minor compaction (all files) 2024-12-11T04:27:31,796 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/A is initiating minor compaction (all files) 2024-12-11T04:27:31,796 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/B in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:31,796 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/A in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:31,796 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/eef115c015274204acce84bc26e5a3ed, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/76f3a91de3ff4155bd728e34a8602195, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/bcd2c286a6864a5b8e8240723137196a] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=35.3 K 2024-12-11T04:27:31,796 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/de59b864c537416bb52af9983fdbc6bf, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/c7f66a38e0d547bda7e192b29e5523ed, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/a640cf02818847c19c96027244bedf33] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=35.3 K 2024-12-11T04:27:31,800 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting eef115c015274204acce84bc26e5a3ed, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733891250583 2024-12-11T04:27:31,800 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting de59b864c537416bb52af9983fdbc6bf, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733891250583 2024-12-11T04:27:31,800 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 76f3a91de3ff4155bd728e34a8602195, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733891250590 2024-12-11T04:27:31,800 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7f66a38e0d547bda7e192b29e5523ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733891250590 2024-12-11T04:27:31,800 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting bcd2c286a6864a5b8e8240723137196a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733891251224 2024-12-11T04:27:31,800 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting a640cf02818847c19c96027244bedf33, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733891251224 2024-12-11T04:27:31,802 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T04:27:31,822 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#A#compaction#246 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:31,822 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/b1506d26d2c54e19bc9a36326fa783d3 is 50, key is test_row_0/A:col10/1733891251328/Put/seqid=0 2024-12-11T04:27:31,831 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#B#compaction#247 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:31,831 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/d959bf47e83b4102ae3a844e9ca38ae9 is 50, key is test_row_0/B:col10/1733891251328/Put/seqid=0 2024-12-11T04:27:31,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-11T04:27:31,838 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,839 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-11T04:27:31,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:31,839 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-11T04:27:31,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:31,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:31,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:31,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:31,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:31,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:31,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742120_1296 (size=12207) 2024-12-11T04:27:31,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/34c776df92cd4685b7dfcfdff73f9dc3 is 50, key is test_row_0/A:col10/1733891251344/Put/seqid=0 2024-12-11T04:27:31,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742122_1298 (size=12001) 2024-12-11T04:27:31,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742121_1297 (size=12207) 2024-12-11T04:27:31,866 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/d959bf47e83b4102ae3a844e9ca38ae9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d959bf47e83b4102ae3a844e9ca38ae9 2024-12-11T04:27:31,871 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/B of d07fdbcaf0fe943be9e071492694f078 into d959bf47e83b4102ae3a844e9ca38ae9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:31,871 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:31,871 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/B, priority=13, startTime=1733891251794; duration=0sec 2024-12-11T04:27:31,871 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:31,871 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:B 2024-12-11T04:27:31,871 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:31,873 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:31,873 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/C is initiating minor compaction (all files) 2024-12-11T04:27:31,873 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/C in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:31,873 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/c12b282c791c4b9c891c7996720c29ab, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/7d4f92a8c8bb474c910a25d0d09bf3bb, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/efc5baf81a2f4efcbab255620642d00d] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=35.3 K 2024-12-11T04:27:31,873 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting c12b282c791c4b9c891c7996720c29ab, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733891250583 2024-12-11T04:27:31,874 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d4f92a8c8bb474c910a25d0d09bf3bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1733891250590 2024-12-11T04:27:31,874 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting efc5baf81a2f4efcbab255620642d00d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733891251224 2024-12-11T04:27:31,882 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#C#compaction#249 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:31,882 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/0dbfb537a12a410b9c0c8d9d65be2943 is 50, key is test_row_0/C:col10/1733891251328/Put/seqid=0 2024-12-11T04:27:31,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742123_1299 (size=12207) 2024-12-11T04:27:31,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:31,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:31,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891311965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891311966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891311966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891311967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891311967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891312070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891312071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891312072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891312072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891312072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-11T04:27:32,256 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/b1506d26d2c54e19bc9a36326fa783d3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b1506d26d2c54e19bc9a36326fa783d3 2024-12-11T04:27:32,257 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/34c776df92cd4685b7dfcfdff73f9dc3 2024-12-11T04:27:32,262 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/A of d07fdbcaf0fe943be9e071492694f078 into b1506d26d2c54e19bc9a36326fa783d3(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:32,262 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:32,262 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/A, priority=13, startTime=1733891251794; duration=0sec 2024-12-11T04:27:32,262 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:32,262 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:A 2024-12-11T04:27:32,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/1addb432485f4d389e150941f985843c is 50, key is test_row_0/B:col10/1733891251344/Put/seqid=0 2024-12-11T04:27:32,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891312273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891312273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891312275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891312277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891312277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742124_1300 (size=12001) 2024-12-11T04:27:32,294 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/0dbfb537a12a410b9c0c8d9d65be2943 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/0dbfb537a12a410b9c0c8d9d65be2943 2024-12-11T04:27:32,300 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/C of d07fdbcaf0fe943be9e071492694f078 into 0dbfb537a12a410b9c0c8d9d65be2943(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:32,300 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:32,300 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/C, priority=13, startTime=1733891251794; duration=0sec 2024-12-11T04:27:32,300 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:32,301 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:C 2024-12-11T04:27:32,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891312576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891312577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891312579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891312581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:32,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891312581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:32,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-11T04:27:32,691 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/1addb432485f4d389e150941f985843c 2024-12-11T04:27:32,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/6427c44225314cb28936c7843a43777e is 50, key is test_row_0/C:col10/1733891251344/Put/seqid=0 2024-12-11T04:27:32,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742125_1301 (size=12001) 2024-12-11T04:27:32,731 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/6427c44225314cb28936c7843a43777e 2024-12-11T04:27:32,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/34c776df92cd4685b7dfcfdff73f9dc3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/34c776df92cd4685b7dfcfdff73f9dc3 2024-12-11T04:27:32,740 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/34c776df92cd4685b7dfcfdff73f9dc3, entries=150, sequenceid=114, filesize=11.7 K 2024-12-11T04:27:32,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/1addb432485f4d389e150941f985843c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/1addb432485f4d389e150941f985843c 2024-12-11T04:27:32,746 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/1addb432485f4d389e150941f985843c, entries=150, sequenceid=114, filesize=11.7 K 2024-12-11T04:27:32,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/6427c44225314cb28936c7843a43777e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6427c44225314cb28936c7843a43777e 2024-12-11T04:27:32,751 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6427c44225314cb28936c7843a43777e, entries=150, sequenceid=114, filesize=11.7 K 2024-12-11T04:27:32,752 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for d07fdbcaf0fe943be9e071492694f078 in 913ms, sequenceid=114, compaction requested=false 2024-12-11T04:27:32,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:32,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:32,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-11T04:27:32,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-11T04:27:32,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-11T04:27:32,757 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2210 sec 2024-12-11T04:27:32,759 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.2260 sec 2024-12-11T04:27:33,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:33,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-11T04:27:33,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:33,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:33,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:33,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:33,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:33,084 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:33,088 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/e5157749a5114b7f910279a352e85399 is 50, key is test_row_0/A:col10/1733891251965/Put/seqid=0 2024-12-11T04:27:33,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891313093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891313093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891313094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742126_1302 (size=12151) 2024-12-11T04:27:33,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/e5157749a5114b7f910279a352e85399 2024-12-11T04:27:33,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891313096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891313096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/ddb3ae1d69b84682a8946ac0a88d3dd5 is 50, key is test_row_0/B:col10/1733891251965/Put/seqid=0 2024-12-11T04:27:33,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742127_1303 (size=12151) 2024-12-11T04:27:33,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891313197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891313197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891313198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891313200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891313200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891313400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891313400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891313400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891313402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891313402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,510 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/ddb3ae1d69b84682a8946ac0a88d3dd5 2024-12-11T04:27:33,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/178b4a9a4c40444f89ca16fc6ff8a9f4 is 50, key is test_row_0/C:col10/1733891251965/Put/seqid=0 2024-12-11T04:27:33,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742128_1304 (size=12151) 2024-12-11T04:27:33,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-11T04:27:33,637 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-11T04:27:33,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:33,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-11T04:27:33,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-11T04:27:33,640 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:33,641 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:33,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:33,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891313703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891313703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891313704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891313705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:33,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891313705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-11T04:27:33,793 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-11T04:27:33,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:33,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:33,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:33,793 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:33,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:33,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:33,923 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/178b4a9a4c40444f89ca16fc6ff8a9f4 2024-12-11T04:27:33,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/e5157749a5114b7f910279a352e85399 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e5157749a5114b7f910279a352e85399 2024-12-11T04:27:33,933 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e5157749a5114b7f910279a352e85399, entries=150, sequenceid=138, filesize=11.9 K 2024-12-11T04:27:33,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/ddb3ae1d69b84682a8946ac0a88d3dd5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ddb3ae1d69b84682a8946ac0a88d3dd5 2024-12-11T04:27:33,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ddb3ae1d69b84682a8946ac0a88d3dd5, entries=150, sequenceid=138, filesize=11.9 K 2024-12-11T04:27:33,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/178b4a9a4c40444f89ca16fc6ff8a9f4 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/178b4a9a4c40444f89ca16fc6ff8a9f4 2024-12-11T04:27:33,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-11T04:27:33,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/178b4a9a4c40444f89ca16fc6ff8a9f4, entries=150, sequenceid=138, filesize=11.9 K 2024-12-11T04:27:33,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for d07fdbcaf0fe943be9e071492694f078 in 860ms, sequenceid=138, compaction requested=true 2024-12-11T04:27:33,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:33,942 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:33,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:33,943 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:33,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:33,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:33,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:33,943 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:33,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:33,944 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:33,944 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:33,944 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/A is initiating minor compaction (all files) 2024-12-11T04:27:33,944 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/B is initiating minor compaction (all files) 2024-12-11T04:27:33,944 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/A in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:33,944 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/B in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:33,944 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b1506d26d2c54e19bc9a36326fa783d3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/34c776df92cd4685b7dfcfdff73f9dc3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e5157749a5114b7f910279a352e85399] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=35.5 K 2024-12-11T04:27:33,944 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d959bf47e83b4102ae3a844e9ca38ae9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/1addb432485f4d389e150941f985843c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ddb3ae1d69b84682a8946ac0a88d3dd5] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=35.5 K 2024-12-11T04:27:33,945 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1506d26d2c54e19bc9a36326fa783d3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733891251224 2024-12-11T04:27:33,945 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting d959bf47e83b4102ae3a844e9ca38ae9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733891251224 2024-12-11T04:27:33,945 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 1addb432485f4d389e150941f985843c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733891251342 2024-12-11T04:27:33,945 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34c776df92cd4685b7dfcfdff73f9dc3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733891251342 2024-12-11T04:27:33,946 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ddb3ae1d69b84682a8946ac0a88d3dd5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733891251965 2024-12-11T04:27:33,946 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:33,946 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5157749a5114b7f910279a352e85399, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733891251965 2024-12-11T04:27:33,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-11T04:27:33,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:33,946 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T04:27:33,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:33,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:33,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:33,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:33,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:33,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:33,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/08a3efa925e6488da87da812a534c3e0 is 50, key is test_row_0/A:col10/1733891253095/Put/seqid=0 2024-12-11T04:27:33,958 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#A#compaction#256 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:33,958 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/ee6ea9b077ff40f09a97178d09e3691c is 50, key is test_row_0/A:col10/1733891251965/Put/seqid=0 2024-12-11T04:27:33,961 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#B#compaction#257 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:33,961 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/372bf382856d41aea31231483409e8ad is 50, key is test_row_0/B:col10/1733891251965/Put/seqid=0 2024-12-11T04:27:33,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742130_1306 (size=12459) 2024-12-11T04:27:33,972 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/ee6ea9b077ff40f09a97178d09e3691c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/ee6ea9b077ff40f09a97178d09e3691c 2024-12-11T04:27:33,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742131_1307 (size=12459) 2024-12-11T04:27:33,977 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/A of d07fdbcaf0fe943be9e071492694f078 into ee6ea9b077ff40f09a97178d09e3691c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:33,977 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:33,977 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/A, priority=13, startTime=1733891253942; duration=0sec 2024-12-11T04:27:33,977 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:33,977 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:A 2024-12-11T04:27:33,978 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:33,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742129_1305 (size=12151) 2024-12-11T04:27:33,979 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:33,980 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/C is initiating minor compaction (all files) 2024-12-11T04:27:33,980 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/C in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:33,980 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/0dbfb537a12a410b9c0c8d9d65be2943, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6427c44225314cb28936c7843a43777e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/178b4a9a4c40444f89ca16fc6ff8a9f4] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=35.5 K 2024-12-11T04:27:33,980 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0dbfb537a12a410b9c0c8d9d65be2943, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1733891251224 2024-12-11T04:27:33,981 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6427c44225314cb28936c7843a43777e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733891251342 2024-12-11T04:27:33,982 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 178b4a9a4c40444f89ca16fc6ff8a9f4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733891251965 2024-12-11T04:27:33,984 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/372bf382856d41aea31231483409e8ad as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/372bf382856d41aea31231483409e8ad 2024-12-11T04:27:33,990 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/B of d07fdbcaf0fe943be9e071492694f078 into 372bf382856d41aea31231483409e8ad(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:33,990 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:33,990 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/B, priority=13, startTime=1733891253943; duration=0sec 2024-12-11T04:27:33,990 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:33,990 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:B 2024-12-11T04:27:33,993 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#C#compaction#258 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:33,993 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/6c858b37a6f14972947c8c24483048c4 is 50, key is test_row_0/C:col10/1733891251965/Put/seqid=0 2024-12-11T04:27:34,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742132_1308 (size=12459) 2024-12-11T04:27:34,022 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/6c858b37a6f14972947c8c24483048c4 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6c858b37a6f14972947c8c24483048c4 2024-12-11T04:27:34,027 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/C of d07fdbcaf0fe943be9e071492694f078 into 6c858b37a6f14972947c8c24483048c4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:34,027 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:34,027 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/C, priority=13, startTime=1733891253943; duration=0sec 2024-12-11T04:27:34,028 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:34,028 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:C 2024-12-11T04:27:34,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:34,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:34,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891314220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891314221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,226 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891314223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891314223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891314223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-11T04:27:34,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891314324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891314324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891314327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891314327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891314327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,379 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/08a3efa925e6488da87da812a534c3e0 2024-12-11T04:27:34,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/71ac100b8f244b28a34a4b82f3ecdc35 is 50, key is test_row_0/B:col10/1733891253095/Put/seqid=0 2024-12-11T04:27:34,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742133_1309 (size=12151) 2024-12-11T04:27:34,391 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/71ac100b8f244b28a34a4b82f3ecdc35 2024-12-11T04:27:34,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/cec9634a396547ea8d190767f21df470 is 50, key is test_row_0/C:col10/1733891253095/Put/seqid=0 2024-12-11T04:27:34,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742134_1310 (size=12151) 2024-12-11T04:27:34,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891314527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891314528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891314529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891314530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891314531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-11T04:27:34,803 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/cec9634a396547ea8d190767f21df470 2024-12-11T04:27:34,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/08a3efa925e6488da87da812a534c3e0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/08a3efa925e6488da87da812a534c3e0 2024-12-11T04:27:34,813 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/08a3efa925e6488da87da812a534c3e0, entries=150, sequenceid=153, filesize=11.9 K 2024-12-11T04:27:34,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/71ac100b8f244b28a34a4b82f3ecdc35 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/71ac100b8f244b28a34a4b82f3ecdc35 2024-12-11T04:27:34,817 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/71ac100b8f244b28a34a4b82f3ecdc35, entries=150, sequenceid=153, filesize=11.9 K 2024-12-11T04:27:34,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/cec9634a396547ea8d190767f21df470 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/cec9634a396547ea8d190767f21df470 2024-12-11T04:27:34,822 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/cec9634a396547ea8d190767f21df470, entries=150, sequenceid=153, filesize=11.9 K 2024-12-11T04:27:34,823 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=134.18 KB/137400 for d07fdbcaf0fe943be9e071492694f078 in 877ms, sequenceid=153, compaction requested=false 2024-12-11T04:27:34,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:34,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:34,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-11T04:27:34,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-11T04:27:34,827 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-11T04:27:34,827 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1840 sec 2024-12-11T04:27:34,828 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.1890 sec 2024-12-11T04:27:34,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:34,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:27:34,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:34,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:34,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:34,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:34,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:34,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:34,837 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/b3d391122bf64c25acc4e1fe70bee2f5 is 50, key is test_row_0/A:col10/1733891254222/Put/seqid=0 2024-12-11T04:27:34,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891314840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891314841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891314842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891314842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891314843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742135_1311 (size=14541) 2024-12-11T04:27:34,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/b3d391122bf64c25acc4e1fe70bee2f5 2024-12-11T04:27:34,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/87faf121ea824e95a4b5b15f9e07b64a is 50, key is test_row_0/B:col10/1733891254222/Put/seqid=0 2024-12-11T04:27:34,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742136_1312 (size=12151) 2024-12-11T04:27:34,865 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/87faf121ea824e95a4b5b15f9e07b64a 2024-12-11T04:27:34,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/83e719f28f8941a88166888a4506b781 is 50, key is test_row_0/C:col10/1733891254222/Put/seqid=0 2024-12-11T04:27:34,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742137_1313 (size=12151) 2024-12-11T04:27:34,886 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/83e719f28f8941a88166888a4506b781 2024-12-11T04:27:34,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/b3d391122bf64c25acc4e1fe70bee2f5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b3d391122bf64c25acc4e1fe70bee2f5 2024-12-11T04:27:34,896 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b3d391122bf64c25acc4e1fe70bee2f5, entries=200, sequenceid=180, filesize=14.2 K 2024-12-11T04:27:34,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/87faf121ea824e95a4b5b15f9e07b64a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/87faf121ea824e95a4b5b15f9e07b64a 2024-12-11T04:27:34,901 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/87faf121ea824e95a4b5b15f9e07b64a, entries=150, sequenceid=180, filesize=11.9 K 2024-12-11T04:27:34,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/83e719f28f8941a88166888a4506b781 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/83e719f28f8941a88166888a4506b781 2024-12-11T04:27:34,907 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/83e719f28f8941a88166888a4506b781, entries=150, sequenceid=180, filesize=11.9 K 2024-12-11T04:27:34,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d07fdbcaf0fe943be9e071492694f078 in 76ms, sequenceid=180, compaction requested=true 2024-12-11T04:27:34,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:34,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:34,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:34,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:34,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:34,908 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:34,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:34,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-11T04:27:34,909 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39151 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:34,909 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/A is initiating minor compaction (all files) 2024-12-11T04:27:34,909 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/A in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:34,910 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/ee6ea9b077ff40f09a97178d09e3691c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/08a3efa925e6488da87da812a534c3e0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b3d391122bf64c25acc4e1fe70bee2f5] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=38.2 K 2024-12-11T04:27:34,910 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:34,911 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ee6ea9b077ff40f09a97178d09e3691c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733891251965 2024-12-11T04:27:34,911 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:34,911 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/B is initiating minor compaction (all files) 2024-12-11T04:27:34,911 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/B in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:34,911 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 08a3efa925e6488da87da812a534c3e0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733891253091 2024-12-11T04:27:34,911 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/372bf382856d41aea31231483409e8ad, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/71ac100b8f244b28a34a4b82f3ecdc35, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/87faf121ea824e95a4b5b15f9e07b64a] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=35.9 K 2024-12-11T04:27:34,912 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting b3d391122bf64c25acc4e1fe70bee2f5, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733891254219 2024-12-11T04:27:34,912 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 372bf382856d41aea31231483409e8ad, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733891251965 2024-12-11T04:27:34,912 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71ac100b8f244b28a34a4b82f3ecdc35, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733891253091 2024-12-11T04:27:34,913 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87faf121ea824e95a4b5b15f9e07b64a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733891254222 2024-12-11T04:27:34,922 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#B#compaction#264 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:34,922 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/084b5256b72443a2a2d9c1f2d9d84c1c is 50, key is test_row_0/B:col10/1733891254222/Put/seqid=0 2024-12-11T04:27:34,934 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#A#compaction#265 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:34,934 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/0e3487774c7c40e3a7741897e81aee0b is 50, key is test_row_0/A:col10/1733891254222/Put/seqid=0 2024-12-11T04:27:34,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742138_1314 (size=12561) 2024-12-11T04:27:34,948 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/084b5256b72443a2a2d9c1f2d9d84c1c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/084b5256b72443a2a2d9c1f2d9d84c1c 2024-12-11T04:27:34,948 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T04:27:34,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:34,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:34,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:34,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:34,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:34,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:34,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:34,953 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/B of d07fdbcaf0fe943be9e071492694f078 into 084b5256b72443a2a2d9c1f2d9d84c1c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:34,953 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:34,953 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/B, priority=13, startTime=1733891254908; duration=0sec 2024-12-11T04:27:34,953 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:34,953 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:B 2024-12-11T04:27:34,954 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:34,955 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:34,955 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/C is initiating minor compaction (all files) 2024-12-11T04:27:34,955 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/C in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:34,955 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6c858b37a6f14972947c8c24483048c4, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/cec9634a396547ea8d190767f21df470, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/83e719f28f8941a88166888a4506b781] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=35.9 K 2024-12-11T04:27:34,956 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c858b37a6f14972947c8c24483048c4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1733891251965 2024-12-11T04:27:34,957 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting cec9634a396547ea8d190767f21df470, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1733891253091 2024-12-11T04:27:34,957 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/4f6320768b7842cd9b3c17cad28ef17e is 50, key is test_row_0/A:col10/1733891254836/Put/seqid=0 2024-12-11T04:27:34,958 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83e719f28f8941a88166888a4506b781, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733891254222 2024-12-11T04:27:34,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742139_1315 (size=12561) 2024-12-11T04:27:34,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742140_1316 (size=12151) 2024-12-11T04:27:34,965 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/4f6320768b7842cd9b3c17cad28ef17e 2024-12-11T04:27:34,970 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#C#compaction#267 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:34,971 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/a02f87e2a5cc41168b714ab210e708ff is 50, key is test_row_0/C:col10/1733891254222/Put/seqid=0 2024-12-11T04:27:34,971 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/0e3487774c7c40e3a7741897e81aee0b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/0e3487774c7c40e3a7741897e81aee0b 2024-12-11T04:27:34,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891314971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,975 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891314971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891314972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891314973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,978 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/A of d07fdbcaf0fe943be9e071492694f078 into 0e3487774c7c40e3a7741897e81aee0b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:34,978 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:34,978 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/A, priority=13, startTime=1733891254908; duration=0sec 2024-12-11T04:27:34,978 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:34,978 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:A 2024-12-11T04:27:34,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:34,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891314976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:34,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/dce78cda5f7b423986d53f4679d02ca1 is 50, key is test_row_0/B:col10/1733891254836/Put/seqid=0 2024-12-11T04:27:34,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742141_1317 (size=12561) 2024-12-11T04:27:34,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742142_1318 (size=12151) 2024-12-11T04:27:35,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891315076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891315077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891315077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891315077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891315079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891315280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891315280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891315281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891315281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891315284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,386 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/a02f87e2a5cc41168b714ab210e708ff as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a02f87e2a5cc41168b714ab210e708ff 2024-12-11T04:27:35,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/dce78cda5f7b423986d53f4679d02ca1 2024-12-11T04:27:35,391 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/C of d07fdbcaf0fe943be9e071492694f078 into a02f87e2a5cc41168b714ab210e708ff(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:35,391 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:35,391 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/C, priority=13, startTime=1733891254908; duration=0sec 2024-12-11T04:27:35,391 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:35,391 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:C 2024-12-11T04:27:35,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/ea201e10d857403f898d8937d34d9f1c is 50, key is test_row_0/C:col10/1733891254836/Put/seqid=0 2024-12-11T04:27:35,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742143_1319 (size=12151) 2024-12-11T04:27:35,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891315584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891315584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891315584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891315587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:35,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891315587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-11T04:27:35,744 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-11T04:27:35,745 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:35,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-11T04:27:35,747 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:35,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-11T04:27:35,747 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:35,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:35,839 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/ea201e10d857403f898d8937d34d9f1c 2024-12-11T04:27:35,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/4f6320768b7842cd9b3c17cad28ef17e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/4f6320768b7842cd9b3c17cad28ef17e 2024-12-11T04:27:35,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-11T04:27:35,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/4f6320768b7842cd9b3c17cad28ef17e, entries=150, sequenceid=194, filesize=11.9 K 2024-12-11T04:27:35,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/dce78cda5f7b423986d53f4679d02ca1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/dce78cda5f7b423986d53f4679d02ca1 2024-12-11T04:27:35,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/dce78cda5f7b423986d53f4679d02ca1, entries=150, sequenceid=194, filesize=11.9 K 2024-12-11T04:27:35,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/ea201e10d857403f898d8937d34d9f1c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ea201e10d857403f898d8937d34d9f1c 2024-12-11T04:27:35,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ea201e10d857403f898d8937d34d9f1c, entries=150, sequenceid=194, filesize=11.9 K 2024-12-11T04:27:35,870 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d07fdbcaf0fe943be9e071492694f078 in 922ms, sequenceid=194, compaction requested=false 2024-12-11T04:27:35,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:35,899 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:35,899 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-11T04:27:35,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:35,900 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T04:27:35,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:35,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:35,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:35,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:35,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:35,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:35,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/9c60abba401a416ea48a3b05ad7a8fa6 is 50, key is test_row_0/A:col10/1733891254969/Put/seqid=0 2024-12-11T04:27:35,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742144_1320 (size=12151) 2024-12-11T04:27:35,922 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/9c60abba401a416ea48a3b05ad7a8fa6 2024-12-11T04:27:35,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/afe96d23b2bc42dc9704570b8bacd872 is 50, key is test_row_0/B:col10/1733891254969/Put/seqid=0 2024-12-11T04:27:35,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742145_1321 (size=12151) 2024-12-11T04:27:35,934 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/afe96d23b2bc42dc9704570b8bacd872 2024-12-11T04:27:35,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/47a05691df2e4d3dbc8287c7ba987142 is 50, key is test_row_0/C:col10/1733891254969/Put/seqid=0 2024-12-11T04:27:35,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742146_1322 (size=12151) 2024-12-11T04:27:35,949 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/47a05691df2e4d3dbc8287c7ba987142 2024-12-11T04:27:35,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/9c60abba401a416ea48a3b05ad7a8fa6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/9c60abba401a416ea48a3b05ad7a8fa6 2024-12-11T04:27:35,960 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/9c60abba401a416ea48a3b05ad7a8fa6, entries=150, sequenceid=219, filesize=11.9 K 2024-12-11T04:27:35,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/afe96d23b2bc42dc9704570b8bacd872 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/afe96d23b2bc42dc9704570b8bacd872 2024-12-11T04:27:35,964 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/afe96d23b2bc42dc9704570b8bacd872, entries=150, sequenceid=219, filesize=11.9 K 2024-12-11T04:27:35,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/47a05691df2e4d3dbc8287c7ba987142 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/47a05691df2e4d3dbc8287c7ba987142 2024-12-11T04:27:35,969 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/47a05691df2e4d3dbc8287c7ba987142, entries=150, sequenceid=219, filesize=11.9 K 2024-12-11T04:27:35,969 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for d07fdbcaf0fe943be9e071492694f078 in 69ms, sequenceid=219, compaction requested=true 2024-12-11T04:27:35,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:35,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:35,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-11T04:27:35,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-11T04:27:35,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-11T04:27:35,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 223 msec 2024-12-11T04:27:35,974 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 228 msec 2024-12-11T04:27:36,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-11T04:27:36,049 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-11T04:27:36,050 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:36,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-11T04:27:36,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-11T04:27:36,051 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:36,052 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:36,052 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:36,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:36,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:27:36,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:36,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:36,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:36,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:36,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:36,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:36,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/697ec88bdffe4612b996b6c8510c4e3c is 50, key is test_row_0/A:col10/1733891256096/Put/seqid=0 2024-12-11T04:27:36,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742147_1323 (size=12151) 2024-12-11T04:27:36,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891316114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891316115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,120 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891316115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891316118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891316119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-11T04:27:36,204 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,204 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-11T04:27:36,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:36,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,205 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891316219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891316221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891316221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891316223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891316224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-11T04:27:36,357 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,357 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-11T04:27:36,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:36,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,358 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891316422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891316424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,426 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891316424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891316426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891316426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/697ec88bdffe4612b996b6c8510c4e3c 2024-12-11T04:27:36,510 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-11T04:27:36,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:36,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/e9535959d15b45adab1218d657b1f382 is 50, key is test_row_0/B:col10/1733891256096/Put/seqid=0 2024-12-11T04:27:36,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742148_1324 (size=12151) 2024-12-11T04:27:36,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-11T04:27:36,662 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-11T04:27:36,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:36,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,663 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891316726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891316728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891316729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891316731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:36,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891316732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,815 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-11T04:27:36,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:36,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/e9535959d15b45adab1218d657b1f382 2024-12-11T04:27:36,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/a9c8821645e24ea38f18bb40e67a2f0b is 50, key is test_row_0/C:col10/1733891256096/Put/seqid=0 2024-12-11T04:27:36,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742149_1325 (size=12151) 2024-12-11T04:27:36,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/a9c8821645e24ea38f18bb40e67a2f0b 2024-12-11T04:27:36,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/697ec88bdffe4612b996b6c8510c4e3c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/697ec88bdffe4612b996b6c8510c4e3c 2024-12-11T04:27:36,960 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/697ec88bdffe4612b996b6c8510c4e3c, entries=150, sequenceid=232, filesize=11.9 K 2024-12-11T04:27:36,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/e9535959d15b45adab1218d657b1f382 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/e9535959d15b45adab1218d657b1f382 2024-12-11T04:27:36,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/e9535959d15b45adab1218d657b1f382, entries=150, sequenceid=232, filesize=11.9 K 2024-12-11T04:27:36,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/a9c8821645e24ea38f18bb40e67a2f0b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a9c8821645e24ea38f18bb40e67a2f0b 2024-12-11T04:27:36,968 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:36,969 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-11T04:27:36,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:36,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,969 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a9c8821645e24ea38f18bb40e67a2f0b, entries=150, sequenceid=232, filesize=11.9 K 2024-12-11T04:27:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:36,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for d07fdbcaf0fe943be9e071492694f078 in 875ms, sequenceid=232, compaction requested=true 2024-12-11T04:27:36,971 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:36,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:36,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:36,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:36,971 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:27:36,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:36,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:36,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:36,971 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:27:36,973 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:27:36,973 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:27:36,973 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/A is initiating minor compaction (all files) 2024-12-11T04:27:36,973 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/B is initiating minor compaction (all files) 2024-12-11T04:27:36,973 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/A in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,973 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/B in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:36,973 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/0e3487774c7c40e3a7741897e81aee0b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/4f6320768b7842cd9b3c17cad28ef17e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/9c60abba401a416ea48a3b05ad7a8fa6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/697ec88bdffe4612b996b6c8510c4e3c] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=47.9 K 2024-12-11T04:27:36,973 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/084b5256b72443a2a2d9c1f2d9d84c1c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/dce78cda5f7b423986d53f4679d02ca1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/afe96d23b2bc42dc9704570b8bacd872, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/e9535959d15b45adab1218d657b1f382] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=47.9 K 2024-12-11T04:27:36,974 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e3487774c7c40e3a7741897e81aee0b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733891254222 2024-12-11T04:27:36,974 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 084b5256b72443a2a2d9c1f2d9d84c1c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733891254222 2024-12-11T04:27:36,974 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4f6320768b7842cd9b3c17cad28ef17e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733891254836 2024-12-11T04:27:36,974 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting dce78cda5f7b423986d53f4679d02ca1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733891254836 2024-12-11T04:27:36,975 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c60abba401a416ea48a3b05ad7a8fa6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733891254969 2024-12-11T04:27:36,975 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting afe96d23b2bc42dc9704570b8bacd872, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733891254969 2024-12-11T04:27:36,975 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 697ec88bdffe4612b996b6c8510c4e3c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733891256093 2024-12-11T04:27:36,975 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e9535959d15b45adab1218d657b1f382, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733891256093 2024-12-11T04:27:36,984 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#A#compaction#276 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:36,985 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/e1978185f0ab484da787afcf3eac8d02 is 50, key is test_row_0/A:col10/1733891256096/Put/seqid=0 2024-12-11T04:27:36,985 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#B#compaction#277 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:36,986 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/e48167d3b37b42efb671c09b19298bf0 is 50, key is test_row_0/B:col10/1733891256096/Put/seqid=0 2024-12-11T04:27:37,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742150_1326 (size=12697) 2024-12-11T04:27:37,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742151_1327 (size=12697) 2024-12-11T04:27:37,011 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/e48167d3b37b42efb671c09b19298bf0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/e48167d3b37b42efb671c09b19298bf0 2024-12-11T04:27:37,016 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/B of d07fdbcaf0fe943be9e071492694f078 into e48167d3b37b42efb671c09b19298bf0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:37,016 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:37,016 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/B, priority=12, startTime=1733891256971; duration=0sec 2024-12-11T04:27:37,016 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:37,016 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:B 2024-12-11T04:27:37,017 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:27:37,018 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:27:37,018 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/C is initiating minor compaction (all files) 2024-12-11T04:27:37,018 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/C in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:37,018 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a02f87e2a5cc41168b714ab210e708ff, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ea201e10d857403f898d8937d34d9f1c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/47a05691df2e4d3dbc8287c7ba987142, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a9c8821645e24ea38f18bb40e67a2f0b] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=47.9 K 2024-12-11T04:27:37,019 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting a02f87e2a5cc41168b714ab210e708ff, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733891254222 2024-12-11T04:27:37,019 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ea201e10d857403f898d8937d34d9f1c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733891254836 2024-12-11T04:27:37,020 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 47a05691df2e4d3dbc8287c7ba987142, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1733891254969 2024-12-11T04:27:37,020 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting a9c8821645e24ea38f18bb40e67a2f0b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733891256093 2024-12-11T04:27:37,030 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#C#compaction#278 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:37,030 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/87ea0f1bda2146bf8f5c07915ef77d68 is 50, key is test_row_0/C:col10/1733891256096/Put/seqid=0 2024-12-11T04:27:37,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742152_1328 (size=12697) 2024-12-11T04:27:37,039 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/87ea0f1bda2146bf8f5c07915ef77d68 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/87ea0f1bda2146bf8f5c07915ef77d68 2024-12-11T04:27:37,043 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/C of d07fdbcaf0fe943be9e071492694f078 into 87ea0f1bda2146bf8f5c07915ef77d68(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:37,043 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:37,043 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/C, priority=12, startTime=1733891256971; duration=0sec 2024-12-11T04:27:37,043 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:37,043 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:C 2024-12-11T04:27:37,122 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-11T04:27:37,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:37,123 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:27:37,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:37,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:37,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:37,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:37,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:37,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:37,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/51a32bc5fc4f445ba2b4b35c0de8155a is 50, key is test_row_0/A:col10/1733891256114/Put/seqid=0 2024-12-11T04:27:37,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742153_1329 (size=12151) 2024-12-11T04:27:37,133 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/51a32bc5fc4f445ba2b4b35c0de8155a 2024-12-11T04:27:37,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/26af753268f24d3c9b0b4e78efe5a881 is 50, key is test_row_0/B:col10/1733891256114/Put/seqid=0 2024-12-11T04:27:37,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-11T04:27:37,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742154_1330 (size=12151) 2024-12-11T04:27:37,170 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/26af753268f24d3c9b0b4e78efe5a881 2024-12-11T04:27:37,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/ed74b69b03d94582a8504fd38cfd8d5e is 50, key is test_row_0/C:col10/1733891256114/Put/seqid=0 2024-12-11T04:27:37,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742155_1331 (size=12151) 2024-12-11T04:27:37,186 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/ed74b69b03d94582a8504fd38cfd8d5e 2024-12-11T04:27:37,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/51a32bc5fc4f445ba2b4b35c0de8155a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/51a32bc5fc4f445ba2b4b35c0de8155a 2024-12-11T04:27:37,195 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/51a32bc5fc4f445ba2b4b35c0de8155a, entries=150, sequenceid=258, filesize=11.9 K 2024-12-11T04:27:37,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/26af753268f24d3c9b0b4e78efe5a881 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/26af753268f24d3c9b0b4e78efe5a881 2024-12-11T04:27:37,200 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/26af753268f24d3c9b0b4e78efe5a881, entries=150, sequenceid=258, filesize=11.9 K 2024-12-11T04:27:37,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/ed74b69b03d94582a8504fd38cfd8d5e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ed74b69b03d94582a8504fd38cfd8d5e 2024-12-11T04:27:37,205 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ed74b69b03d94582a8504fd38cfd8d5e, entries=150, sequenceid=258, filesize=11.9 K 2024-12-11T04:27:37,207 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for d07fdbcaf0fe943be9e071492694f078 in 84ms, sequenceid=258, compaction requested=false 2024-12-11T04:27:37,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:37,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:37,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-11T04:27:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-11T04:27:37,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-11T04:27:37,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1560 sec 2024-12-11T04:27:37,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.1600 sec 2024-12-11T04:27:37,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:37,239 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:27:37,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:37,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:37,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:37,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:37,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:37,240 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:37,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/2132328d46754e02a9a6104c872135f5 is 50, key is test_row_0/A:col10/1733891257238/Put/seqid=0 2024-12-11T04:27:37,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742156_1332 (size=12301) 2024-12-11T04:27:37,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/2132328d46754e02a9a6104c872135f5 2024-12-11T04:27:37,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891317258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/d8e9c0c695fd4d948dbe3544daa2747e is 50, key is test_row_0/B:col10/1733891257238/Put/seqid=0 2024-12-11T04:27:37,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891317261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891317263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891317264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742157_1333 (size=12301) 2024-12-11T04:27:37,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891317265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,367 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891317364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891317367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891317369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891317369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891317370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,407 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/e1978185f0ab484da787afcf3eac8d02 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e1978185f0ab484da787afcf3eac8d02 2024-12-11T04:27:37,413 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/A of d07fdbcaf0fe943be9e071492694f078 into e1978185f0ab484da787afcf3eac8d02(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:37,413 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:37,413 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/A, priority=12, startTime=1733891256971; duration=0sec 2024-12-11T04:27:37,413 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:37,413 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:A 2024-12-11T04:27:37,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891317569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891317570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891317572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891317573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891317574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,670 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/d8e9c0c695fd4d948dbe3544daa2747e 2024-12-11T04:27:37,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/b466467e180149ea9da2b7fde4f13b72 is 50, key is test_row_0/C:col10/1733891257238/Put/seqid=0 2024-12-11T04:27:37,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742158_1334 (size=12301) 2024-12-11T04:27:37,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891317872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891317874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891317876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891317877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:37,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:37,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891317878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,082 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/b466467e180149ea9da2b7fde4f13b72 2024-12-11T04:27:38,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/2132328d46754e02a9a6104c872135f5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/2132328d46754e02a9a6104c872135f5 2024-12-11T04:27:38,094 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/2132328d46754e02a9a6104c872135f5, entries=150, sequenceid=270, filesize=12.0 K 2024-12-11T04:27:38,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/d8e9c0c695fd4d948dbe3544daa2747e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d8e9c0c695fd4d948dbe3544daa2747e 2024-12-11T04:27:38,098 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d8e9c0c695fd4d948dbe3544daa2747e, entries=150, sequenceid=270, filesize=12.0 K 2024-12-11T04:27:38,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/b466467e180149ea9da2b7fde4f13b72 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/b466467e180149ea9da2b7fde4f13b72 2024-12-11T04:27:38,103 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/b466467e180149ea9da2b7fde4f13b72, entries=150, sequenceid=270, filesize=12.0 K 2024-12-11T04:27:38,104 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for d07fdbcaf0fe943be9e071492694f078 in 865ms, sequenceid=270, compaction requested=true 2024-12-11T04:27:38,104 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:38,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:38,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:38,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:38,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:38,104 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:38,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:38,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-11T04:27:38,105 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:38,106 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:38,106 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:38,106 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/A is initiating minor compaction (all files) 2024-12-11T04:27:38,106 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/B is initiating minor compaction (all files) 2024-12-11T04:27:38,106 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/B in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:38,106 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/e48167d3b37b42efb671c09b19298bf0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/26af753268f24d3c9b0b4e78efe5a881, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d8e9c0c695fd4d948dbe3544daa2747e] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=36.3 K 2024-12-11T04:27:38,106 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/A in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:38,106 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e1978185f0ab484da787afcf3eac8d02, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/51a32bc5fc4f445ba2b4b35c0de8155a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/2132328d46754e02a9a6104c872135f5] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=36.3 K 2024-12-11T04:27:38,107 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting e48167d3b37b42efb671c09b19298bf0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733891256093 2024-12-11T04:27:38,107 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e1978185f0ab484da787afcf3eac8d02, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733891256093 2024-12-11T04:27:38,107 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 51a32bc5fc4f445ba2b4b35c0de8155a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733891256114 2024-12-11T04:27:38,107 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26af753268f24d3c9b0b4e78efe5a881, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733891256114 2024-12-11T04:27:38,108 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 2132328d46754e02a9a6104c872135f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733891257236 2024-12-11T04:27:38,108 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8e9c0c695fd4d948dbe3544daa2747e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733891257236 2024-12-11T04:27:38,116 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#A#compaction#285 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:38,117 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#B#compaction#286 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:38,117 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/e655e20da2154b9c912f52f973629e0b is 50, key is test_row_0/A:col10/1733891257238/Put/seqid=0 2024-12-11T04:27:38,117 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/d5329b325b5344ae9f04c9cfafe3a998 is 50, key is test_row_0/B:col10/1733891257238/Put/seqid=0 2024-12-11T04:27:38,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742160_1336 (size=12949) 2024-12-11T04:27:38,147 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/e655e20da2154b9c912f52f973629e0b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e655e20da2154b9c912f52f973629e0b 2024-12-11T04:27:38,152 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/A of d07fdbcaf0fe943be9e071492694f078 into e655e20da2154b9c912f52f973629e0b(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:38,152 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:38,152 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/A, priority=13, startTime=1733891258104; duration=0sec 2024-12-11T04:27:38,152 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:38,152 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:A 2024-12-11T04:27:38,152 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:38,153 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:38,153 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/C is initiating minor compaction (all files) 2024-12-11T04:27:38,154 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/C in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:38,154 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/87ea0f1bda2146bf8f5c07915ef77d68, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ed74b69b03d94582a8504fd38cfd8d5e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/b466467e180149ea9da2b7fde4f13b72] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=36.3 K 2024-12-11T04:27:38,154 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 87ea0f1bda2146bf8f5c07915ef77d68, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1733891256093 2024-12-11T04:27:38,155 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ed74b69b03d94582a8504fd38cfd8d5e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733891256114 2024-12-11T04:27:38,156 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting b466467e180149ea9da2b7fde4f13b72, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733891257236 2024-12-11T04:27:38,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-11T04:27:38,156 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-11T04:27:38,157 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:38,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742159_1335 (size=12949) 2024-12-11T04:27:38,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-11T04:27:38,160 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:38,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-11T04:27:38,161 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:38,161 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:38,164 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/d5329b325b5344ae9f04c9cfafe3a998 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d5329b325b5344ae9f04c9cfafe3a998 2024-12-11T04:27:38,168 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#C#compaction#287 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:38,169 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/dd90aa33a98d4e9aa3e4c06d53b95cf6 is 50, key is test_row_0/C:col10/1733891257238/Put/seqid=0 2024-12-11T04:27:38,174 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/B of d07fdbcaf0fe943be9e071492694f078 into d5329b325b5344ae9f04c9cfafe3a998(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:38,174 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:38,175 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/B, priority=13, startTime=1733891258104; duration=0sec 2024-12-11T04:27:38,175 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:38,175 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:B 2024-12-11T04:27:38,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742161_1337 (size=12949) 2024-12-11T04:27:38,206 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/dd90aa33a98d4e9aa3e4c06d53b95cf6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/dd90aa33a98d4e9aa3e4c06d53b95cf6 2024-12-11T04:27:38,211 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/C of d07fdbcaf0fe943be9e071492694f078 into dd90aa33a98d4e9aa3e4c06d53b95cf6(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:38,211 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:38,211 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/C, priority=13, startTime=1733891258104; duration=0sec 2024-12-11T04:27:38,211 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:38,211 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:C 2024-12-11T04:27:38,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-11T04:27:38,313 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-11T04:27:38,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:38,314 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T04:27:38,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:38,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:38,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:38,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:38,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:38,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:38,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/8b8b45d4489a456ab0626013b14625ac is 50, key is test_row_0/A:col10/1733891257263/Put/seqid=0 2024-12-11T04:27:38,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742162_1338 (size=12301) 2024-12-11T04:27:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:38,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:38,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891318384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891318384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891318385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891318386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891318386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-11T04:27:38,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891318487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891318489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891318489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891318489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,691 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891318690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891318691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,692 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891318692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891318692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,734 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/8b8b45d4489a456ab0626013b14625ac 2024-12-11T04:27:38,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/ec8890b724c94cb888bbdf0a4aff8343 is 50, key is test_row_0/B:col10/1733891257263/Put/seqid=0 2024-12-11T04:27:38,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742163_1339 (size=12301) 2024-12-11T04:27:38,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-11T04:27:38,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891318994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891318995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891318995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:38,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:38,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891318995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:39,156 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/ec8890b724c94cb888bbdf0a4aff8343 2024-12-11T04:27:39,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/242adfe0802d4f05a745dc514e5f93ee is 50, key is test_row_0/C:col10/1733891257263/Put/seqid=0 2024-12-11T04:27:39,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742164_1340 (size=12301) 2024-12-11T04:27:39,191 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/242adfe0802d4f05a745dc514e5f93ee 2024-12-11T04:27:39,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/8b8b45d4489a456ab0626013b14625ac as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/8b8b45d4489a456ab0626013b14625ac 2024-12-11T04:27:39,201 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/8b8b45d4489a456ab0626013b14625ac, entries=150, sequenceid=299, filesize=12.0 K 2024-12-11T04:27:39,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/ec8890b724c94cb888bbdf0a4aff8343 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ec8890b724c94cb888bbdf0a4aff8343 2024-12-11T04:27:39,207 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ec8890b724c94cb888bbdf0a4aff8343, entries=150, sequenceid=299, filesize=12.0 K 2024-12-11T04:27:39,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/242adfe0802d4f05a745dc514e5f93ee as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/242adfe0802d4f05a745dc514e5f93ee 2024-12-11T04:27:39,214 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/242adfe0802d4f05a745dc514e5f93ee, entries=150, sequenceid=299, filesize=12.0 K 2024-12-11T04:27:39,216 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for d07fdbcaf0fe943be9e071492694f078 in 902ms, sequenceid=299, compaction requested=false 2024-12-11T04:27:39,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:39,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:39,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-11T04:27:39,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-11T04:27:39,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-11T04:27:39,218 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0560 sec 2024-12-11T04:27:39,220 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.0620 sec 2024-12-11T04:27:39,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-11T04:27:39,264 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-11T04:27:39,265 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:39,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-12-11T04:27:39,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T04:27:39,267 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:39,268 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:39,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:39,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T04:27:39,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:39,394 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T04:27:39,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:39,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:39,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:39,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:39,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:39,395 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:39,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/c7ff829234fc4b2a9094df3b037ecd50 is 50, key is test_row_0/A:col10/1733891258383/Put/seqid=0 2024-12-11T04:27:39,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742165_1341 (size=12301) 2024-12-11T04:27:39,405 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/c7ff829234fc4b2a9094df3b037ecd50 2024-12-11T04:27:39,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/981d584e30e140199fda5bb99b623085 is 50, key is test_row_0/B:col10/1733891258383/Put/seqid=0 2024-12-11T04:27:39,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742166_1342 (size=12301) 2024-12-11T04:27:39,423 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:39,423 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-11T04:27:39,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:39,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:39,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:39,423 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:39,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:39,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:39,464 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:39,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891319463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:39,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:39,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891319498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:39,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:39,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891319498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:39,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:39,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891319498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:39,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:39,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891319501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:39,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:39,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891319565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:39,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T04:27:39,575 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:39,576 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-11T04:27:39,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:39,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:39,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:39,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:39,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:39,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:39,728 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:39,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-11T04:27:39,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:39,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:39,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:39,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:39,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:39,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:39,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:39,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891319768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:39,818 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/981d584e30e140199fda5bb99b623085 2024-12-11T04:27:39,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/a0943931a1e04647a6418939ba222f9e is 50, key is test_row_0/C:col10/1733891258383/Put/seqid=0 2024-12-11T04:27:39,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742167_1343 (size=12301) 2024-12-11T04:27:39,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T04:27:39,881 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:39,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-11T04:27:39,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:39,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:39,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:39,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:39,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:39,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:40,035 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-11T04:27:40,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:40,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:40,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:40,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:40,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:40,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:40,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891320073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,189 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-11T04:27:40,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:40,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:40,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:40,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:40,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:40,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:40,237 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/a0943931a1e04647a6418939ba222f9e 2024-12-11T04:27:40,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/c7ff829234fc4b2a9094df3b037ecd50 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/c7ff829234fc4b2a9094df3b037ecd50 2024-12-11T04:27:40,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/c7ff829234fc4b2a9094df3b037ecd50, entries=150, sequenceid=311, filesize=12.0 K 2024-12-11T04:27:40,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/981d584e30e140199fda5bb99b623085 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/981d584e30e140199fda5bb99b623085 2024-12-11T04:27:40,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/981d584e30e140199fda5bb99b623085, entries=150, sequenceid=311, filesize=12.0 K 2024-12-11T04:27:40,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/a0943931a1e04647a6418939ba222f9e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a0943931a1e04647a6418939ba222f9e 2024-12-11T04:27:40,254 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a0943931a1e04647a6418939ba222f9e, entries=150, sequenceid=311, filesize=12.0 K 2024-12-11T04:27:40,255 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d07fdbcaf0fe943be9e071492694f078 in 861ms, sequenceid=311, compaction requested=true 2024-12-11T04:27:40,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:40,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:40,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:40,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:40,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:40,256 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:40,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:40,256 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:40,256 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:40,256 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:40,256 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:40,257 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/B is initiating minor compaction (all files) 2024-12-11T04:27:40,257 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/A is initiating minor compaction (all files) 2024-12-11T04:27:40,257 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/B in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:40,257 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/A in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:40,257 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d5329b325b5344ae9f04c9cfafe3a998, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ec8890b724c94cb888bbdf0a4aff8343, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/981d584e30e140199fda5bb99b623085] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=36.7 K 2024-12-11T04:27:40,257 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e655e20da2154b9c912f52f973629e0b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/8b8b45d4489a456ab0626013b14625ac, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/c7ff829234fc4b2a9094df3b037ecd50] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=36.7 K 2024-12-11T04:27:40,257 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting d5329b325b5344ae9f04c9cfafe3a998, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733891257236 2024-12-11T04:27:40,257 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting e655e20da2154b9c912f52f973629e0b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733891257236 2024-12-11T04:27:40,258 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ec8890b724c94cb888bbdf0a4aff8343, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733891257260 2024-12-11T04:27:40,258 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b8b45d4489a456ab0626013b14625ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733891257260 2024-12-11T04:27:40,258 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 981d584e30e140199fda5bb99b623085, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733891258383 2024-12-11T04:27:40,258 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7ff829234fc4b2a9094df3b037ecd50, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733891258383 2024-12-11T04:27:40,265 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#B#compaction#294 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:40,266 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/300053bf126c429590def4ed0e6ece67 is 50, key is test_row_0/B:col10/1733891258383/Put/seqid=0 2024-12-11T04:27:40,266 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#A#compaction#295 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:40,267 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/bfa313d0bfd64da49b6d521c1dec3638 is 50, key is test_row_0/A:col10/1733891258383/Put/seqid=0 2024-12-11T04:27:40,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742168_1344 (size=13051) 2024-12-11T04:27:40,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742169_1345 (size=13051) 2024-12-11T04:27:40,341 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-11T04:27:40,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:40,342 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:27:40,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:40,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:40,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:40,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:40,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:40,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:40,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/d694b42d7e2b4a58ab53df1a34dbdc6d is 50, key is test_row_0/A:col10/1733891259452/Put/seqid=0 2024-12-11T04:27:40,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742170_1346 (size=12301) 2024-12-11T04:27:40,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T04:27:40,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:40,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:40,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891320512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891320512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,517 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891320515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891320515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891320579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891320617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891320617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891320618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891320620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,680 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/300053bf126c429590def4ed0e6ece67 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/300053bf126c429590def4ed0e6ece67 2024-12-11T04:27:40,682 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/bfa313d0bfd64da49b6d521c1dec3638 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/bfa313d0bfd64da49b6d521c1dec3638 2024-12-11T04:27:40,687 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/B of d07fdbcaf0fe943be9e071492694f078 into 300053bf126c429590def4ed0e6ece67(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:40,687 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:40,687 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/B, priority=13, startTime=1733891260255; duration=0sec 2024-12-11T04:27:40,687 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:40,687 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:B 2024-12-11T04:27:40,687 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:40,687 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/A of d07fdbcaf0fe943be9e071492694f078 into bfa313d0bfd64da49b6d521c1dec3638(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:40,687 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:40,687 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/A, priority=13, startTime=1733891260255; duration=0sec 2024-12-11T04:27:40,687 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:40,687 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:A 2024-12-11T04:27:40,688 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:40,689 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): d07fdbcaf0fe943be9e071492694f078/C is initiating minor compaction (all files) 2024-12-11T04:27:40,689 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of d07fdbcaf0fe943be9e071492694f078/C in TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:40,689 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/dd90aa33a98d4e9aa3e4c06d53b95cf6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/242adfe0802d4f05a745dc514e5f93ee, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a0943931a1e04647a6418939ba222f9e] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp, totalSize=36.7 K 2024-12-11T04:27:40,689 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting dd90aa33a98d4e9aa3e4c06d53b95cf6, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1733891257236 2024-12-11T04:27:40,690 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 242adfe0802d4f05a745dc514e5f93ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733891257260 2024-12-11T04:27:40,690 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting a0943931a1e04647a6418939ba222f9e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733891258383 2024-12-11T04:27:40,696 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): d07fdbcaf0fe943be9e071492694f078#C#compaction#297 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:40,697 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/195e5b99ea3646e0aaff4daaf8e48602 is 50, key is test_row_0/C:col10/1733891258383/Put/seqid=0 2024-12-11T04:27:40,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742171_1347 (size=13051) 2024-12-11T04:27:40,751 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/d694b42d7e2b4a58ab53df1a34dbdc6d 2024-12-11T04:27:40,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/b5743d27b1364174a6520bc08cfebe78 is 50, key is test_row_0/B:col10/1733891259452/Put/seqid=0 2024-12-11T04:27:40,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742172_1348 (size=12301) 2024-12-11T04:27:40,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891320820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891320820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891320821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:40,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:40,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891320823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,115 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/195e5b99ea3646e0aaff4daaf8e48602 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/195e5b99ea3646e0aaff4daaf8e48602 2024-12-11T04:27:41,120 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in d07fdbcaf0fe943be9e071492694f078/C of d07fdbcaf0fe943be9e071492694f078 into 195e5b99ea3646e0aaff4daaf8e48602(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:41,120 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:41,120 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078., storeName=d07fdbcaf0fe943be9e071492694f078/C, priority=13, startTime=1733891260256; duration=0sec 2024-12-11T04:27:41,120 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:41,120 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:C 2024-12-11T04:27:41,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891321124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:41,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891321124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:41,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891321125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:41,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891321127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,164 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/b5743d27b1364174a6520bc08cfebe78 2024-12-11T04:27:41,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/1b7d0fc636fd4bc8950af70c1b600207 is 50, key is test_row_0/C:col10/1733891259452/Put/seqid=0 2024-12-11T04:27:41,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742173_1349 (size=12301) 2024-12-11T04:27:41,199 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/1b7d0fc636fd4bc8950af70c1b600207 2024-12-11T04:27:41,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/d694b42d7e2b4a58ab53df1a34dbdc6d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d694b42d7e2b4a58ab53df1a34dbdc6d 2024-12-11T04:27:41,208 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d694b42d7e2b4a58ab53df1a34dbdc6d, entries=150, sequenceid=335, filesize=12.0 K 2024-12-11T04:27:41,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/b5743d27b1364174a6520bc08cfebe78 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/b5743d27b1364174a6520bc08cfebe78 2024-12-11T04:27:41,214 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/b5743d27b1364174a6520bc08cfebe78, entries=150, sequenceid=335, filesize=12.0 K 2024-12-11T04:27:41,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/1b7d0fc636fd4bc8950af70c1b600207 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/1b7d0fc636fd4bc8950af70c1b600207 2024-12-11T04:27:41,221 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/1b7d0fc636fd4bc8950af70c1b600207, entries=150, sequenceid=335, filesize=12.0 K 2024-12-11T04:27:41,222 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for d07fdbcaf0fe943be9e071492694f078 in 880ms, sequenceid=335, compaction requested=false 2024-12-11T04:27:41,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:41,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:41,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-11T04:27:41,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-11T04:27:41,226 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-11T04:27:41,226 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9550 sec 2024-12-11T04:27:41,227 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.9610 sec 2024-12-11T04:27:41,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-11T04:27:41,371 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-11T04:27:41,372 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:41,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-12-11T04:27:41,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-11T04:27:41,374 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:41,375 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:41,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:41,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-11T04:27:41,527 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-12-11T04:27:41,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:41,527 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T04:27:41,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:41,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:41,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:41,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:41,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:41,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:41,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/241909a43936433caf35344927c44e61 is 50, key is test_row_0/A:col10/1733891260514/Put/seqid=0 2024-12-11T04:27:41,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742174_1350 (size=9857) 2024-12-11T04:27:41,538 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/241909a43936433caf35344927c44e61 2024-12-11T04:27:41,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/5843c9531c224a8d914b7051adae5c52 is 50, key is test_row_0/B:col10/1733891260514/Put/seqid=0 2024-12-11T04:27:41,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742175_1351 (size=9857) 2024-12-11T04:27:41,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:41,583 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:41,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:41,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891321622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:41,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891321626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:41,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891321629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:41,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891321629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:41,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891321632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-11T04:27:41,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:41,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891321725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:41,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891321928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:41,949 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/5843c9531c224a8d914b7051adae5c52 2024-12-11T04:27:41,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-11T04:27:41,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/d76822c13107449b8bcab21bab74f298 is 50, key is test_row_0/C:col10/1733891260514/Put/seqid=0 2024-12-11T04:27:41,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742176_1352 (size=9857) 2024-12-11T04:27:42,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891322231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,396 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/d76822c13107449b8bcab21bab74f298 2024-12-11T04:27:42,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/241909a43936433caf35344927c44e61 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/241909a43936433caf35344927c44e61 2024-12-11T04:27:42,406 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/241909a43936433caf35344927c44e61, entries=100, sequenceid=351, filesize=9.6 K 2024-12-11T04:27:42,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/5843c9531c224a8d914b7051adae5c52 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/5843c9531c224a8d914b7051adae5c52 2024-12-11T04:27:42,411 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/5843c9531c224a8d914b7051adae5c52, entries=100, sequenceid=351, filesize=9.6 K 2024-12-11T04:27:42,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/d76822c13107449b8bcab21bab74f298 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/d76822c13107449b8bcab21bab74f298 2024-12-11T04:27:42,416 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/d76822c13107449b8bcab21bab74f298, entries=100, sequenceid=351, filesize=9.6 K 2024-12-11T04:27:42,417 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for d07fdbcaf0fe943be9e071492694f078 in 890ms, sequenceid=351, compaction requested=true 2024-12-11T04:27:42,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:42,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:42,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-12-11T04:27:42,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-12-11T04:27:42,420 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-11T04:27:42,420 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0430 sec 2024-12-11T04:27:42,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 1.0490 sec 2024-12-11T04:27:42,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-11T04:27:42,479 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-12-11T04:27:42,480 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:42,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees 2024-12-11T04:27:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-11T04:27:42,482 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:42,483 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=93, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:42,483 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:42,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-11T04:27:42,634 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=94 2024-12-11T04:27:42,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:42,635 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T04:27:42,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:42,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:42,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:42,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:42,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:42,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:42,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:42,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:42,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/bac846e2a8c54c0d982043a5c318ea5b is 50, key is test_row_0/A:col10/1733891262635/Put/seqid=0 2024-12-11T04:27:42,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742177_1353 (size=12301) 2024-12-11T04:27:42,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891322676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891322676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891322677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891322677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891322736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891322780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,781 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891322780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891322780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891322780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-11T04:27:42,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891322983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891322984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891322984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:42,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:42,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891322985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,052 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/bac846e2a8c54c0d982043a5c318ea5b 2024-12-11T04:27:43,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/558d5232a0404a3abb797e648b4a06d4 is 50, key is test_row_0/B:col10/1733891262635/Put/seqid=0 2024-12-11T04:27:43,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742178_1354 (size=12301) 2024-12-11T04:27:43,063 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/558d5232a0404a3abb797e648b4a06d4 2024-12-11T04:27:43,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/8b3bc77628e047a8b3caf76aacbf6f99 is 50, key is test_row_0/C:col10/1733891262635/Put/seqid=0 2024-12-11T04:27:43,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742179_1355 (size=12301) 2024-12-11T04:27:43,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-11T04:27:43,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891323286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891323288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891323288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891323289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,475 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/8b3bc77628e047a8b3caf76aacbf6f99 2024-12-11T04:27:43,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/bac846e2a8c54c0d982043a5c318ea5b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/bac846e2a8c54c0d982043a5c318ea5b 2024-12-11T04:27:43,483 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/bac846e2a8c54c0d982043a5c318ea5b, entries=150, sequenceid=375, filesize=12.0 K 2024-12-11T04:27:43,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/558d5232a0404a3abb797e648b4a06d4 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/558d5232a0404a3abb797e648b4a06d4 2024-12-11T04:27:43,488 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/558d5232a0404a3abb797e648b4a06d4, entries=150, sequenceid=375, filesize=12.0 K 2024-12-11T04:27:43,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/8b3bc77628e047a8b3caf76aacbf6f99 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/8b3bc77628e047a8b3caf76aacbf6f99 2024-12-11T04:27:43,493 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/8b3bc77628e047a8b3caf76aacbf6f99, entries=150, sequenceid=375, filesize=12.0 K 2024-12-11T04:27:43,494 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for d07fdbcaf0fe943be9e071492694f078 in 859ms, sequenceid=375, compaction requested=true 2024-12-11T04:27:43,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:43,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:43,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=94}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=94 2024-12-11T04:27:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=94 2024-12-11T04:27:43,497 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-11T04:27:43,497 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0130 sec 2024-12-11T04:27:43,499 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=93, table=TestAcidGuarantees in 1.0170 sec 2024-12-11T04:27:43,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-12-11T04:27:43,585 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-12-11T04:27:43,586 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:43,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees 2024-12-11T04:27:43,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-11T04:27:43,588 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=95, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:43,588 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=95, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:43,588 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:43,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-11T04:27:43,740 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=96 2024-12-11T04:27:43,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:43,741 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T04:27:43,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:43,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:43,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:43,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:43,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:43,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:43,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:43,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:43,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/d3ed69d6c6ff431aa90249214d220a66 is 50, key is test_row_0/A:col10/1733891262676/Put/seqid=0 2024-12-11T04:27:43,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742180_1356 (size=12301) 2024-12-11T04:27:43,756 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/d3ed69d6c6ff431aa90249214d220a66 2024-12-11T04:27:43,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/87cb2adfdf2649bfb3a4468c0b753225 is 50, key is test_row_0/B:col10/1733891262676/Put/seqid=0 2024-12-11T04:27:43,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742181_1357 (size=12301) 2024-12-11T04:27:43,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891323797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891323798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891323798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891323799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891323799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-11T04:27:43,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891323901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891323904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891323904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891323904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:43,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:43,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891323904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891324104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891324106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891324107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891324107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891324107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,170 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/87cb2adfdf2649bfb3a4468c0b753225 2024-12-11T04:27:44,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/eb08487fd41747839ad5a0767207b8b8 is 50, key is test_row_0/C:col10/1733891262676/Put/seqid=0 2024-12-11T04:27:44,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742182_1358 (size=12301) 2024-12-11T04:27:44,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-11T04:27:44,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891324409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891324410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891324412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891324412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891324412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,582 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/eb08487fd41747839ad5a0767207b8b8 2024-12-11T04:27:44,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/d3ed69d6c6ff431aa90249214d220a66 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d3ed69d6c6ff431aa90249214d220a66 2024-12-11T04:27:44,596 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d3ed69d6c6ff431aa90249214d220a66, entries=150, sequenceid=387, filesize=12.0 K 2024-12-11T04:27:44,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/87cb2adfdf2649bfb3a4468c0b753225 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/87cb2adfdf2649bfb3a4468c0b753225 2024-12-11T04:27:44,600 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/87cb2adfdf2649bfb3a4468c0b753225, entries=150, sequenceid=387, filesize=12.0 K 2024-12-11T04:27:44,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/eb08487fd41747839ad5a0767207b8b8 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/eb08487fd41747839ad5a0767207b8b8 2024-12-11T04:27:44,604 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/eb08487fd41747839ad5a0767207b8b8, entries=150, sequenceid=387, filesize=12.0 K 2024-12-11T04:27:44,605 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for d07fdbcaf0fe943be9e071492694f078 in 864ms, sequenceid=387, compaction requested=true 2024-12-11T04:27:44,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:44,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:44,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=96}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=96 2024-12-11T04:27:44,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=96 2024-12-11T04:27:44,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-11T04:27:44,608 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0190 sec 2024-12-11T04:27:44,610 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=95, table=TestAcidGuarantees in 1.0230 sec 2024-12-11T04:27:44,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-11T04:27:44,691 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-12-11T04:27:44,693 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:44,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees 2024-12-11T04:27:44,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-11T04:27:44,694 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=97, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:44,695 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=97, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:44,695 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:44,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-11T04:27:44,847 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=98 2024-12-11T04:27:44,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:44,848 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:27:44,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:44,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:44,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:44,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:44,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:44,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:44,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/8278c92fbd75461eaf0cdeab6f8cdf62 is 50, key is test_row_0/A:col10/1733891263797/Put/seqid=0 2024-12-11T04:27:44,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742183_1359 (size=12301) 2024-12-11T04:27:44,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:44,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:44,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891324925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891324926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891324926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891324926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:44,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891324927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:44,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-11T04:27:45,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891325031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891325031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891325031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891325032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,034 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891325032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891325234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891325234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891325234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891325235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891325236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,258 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/8278c92fbd75461eaf0cdeab6f8cdf62 2024-12-11T04:27:45,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/5f35c362476d4b08ada49ded527ca3d6 is 50, key is test_row_0/B:col10/1733891263797/Put/seqid=0 2024-12-11T04:27:45,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742184_1360 (size=12301) 2024-12-11T04:27:45,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-11T04:27:45,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891325539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891325539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891325540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891325538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:45,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891325541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:45,687 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/5f35c362476d4b08ada49ded527ca3d6 2024-12-11T04:27:45,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/088e3abec32b41cfb8851e2630a9693e is 50, key is test_row_0/C:col10/1733891263797/Put/seqid=0 2024-12-11T04:27:45,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742185_1361 (size=12301) 2024-12-11T04:27:45,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-11T04:27:46,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891326045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:46,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:46,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891326045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:46,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:46,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891326046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:46,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:46,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891326048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:46,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:46,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891326049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:46,101 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/088e3abec32b41cfb8851e2630a9693e 2024-12-11T04:27:46,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/8278c92fbd75461eaf0cdeab6f8cdf62 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/8278c92fbd75461eaf0cdeab6f8cdf62 2024-12-11T04:27:46,111 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/8278c92fbd75461eaf0cdeab6f8cdf62, entries=150, sequenceid=411, filesize=12.0 K 2024-12-11T04:27:46,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/5f35c362476d4b08ada49ded527ca3d6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/5f35c362476d4b08ada49ded527ca3d6 2024-12-11T04:27:46,116 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/5f35c362476d4b08ada49ded527ca3d6, entries=150, sequenceid=411, filesize=12.0 K 2024-12-11T04:27:46,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/088e3abec32b41cfb8851e2630a9693e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/088e3abec32b41cfb8851e2630a9693e 2024-12-11T04:27:46,121 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/088e3abec32b41cfb8851e2630a9693e, entries=150, sequenceid=411, filesize=12.0 K 2024-12-11T04:27:46,122 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for d07fdbcaf0fe943be9e071492694f078 in 1274ms, sequenceid=411, compaction requested=true 2024-12-11T04:27:46,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:46,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:46,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=98}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=98 2024-12-11T04:27:46,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=98 2024-12-11T04:27:46,124 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-12-11T04:27:46,124 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4280 sec 2024-12-11T04:27:46,126 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=97, table=TestAcidGuarantees in 1.4320 sec 2024-12-11T04:27:46,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-12-11T04:27:46,799 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-12-11T04:27:46,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=99, table=TestAcidGuarantees 2024-12-11T04:27:46,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-11T04:27:46,802 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=99, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=99, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:46,802 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=99, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=99, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:46,802 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:46,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-11T04:27:46,954 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:46,955 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=100 2024-12-11T04:27:46,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:46,955 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T04:27:46,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:46,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:46,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:46,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:46,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:46,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:46,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/59ed8ece829d4a91acf2c2d4b2d65cf2 is 50, key is test_row_0/A:col10/1733891264920/Put/seqid=0 2024-12-11T04:27:46,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742186_1362 (size=12301) 2024-12-11T04:27:47,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:47,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:47,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891327070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891327071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891327071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,077 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891327071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891327072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-11T04:27:47,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891327176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891327177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891327178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891327178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891327178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,366 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/59ed8ece829d4a91acf2c2d4b2d65cf2 2024-12-11T04:27:47,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/df025d2bac1946ab80050273cc0b1c7c is 50, key is test_row_0/B:col10/1733891264920/Put/seqid=0 2024-12-11T04:27:47,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742187_1363 (size=12301) 2024-12-11T04:27:47,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891327379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891327381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,383 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891327381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891327382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891327382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-11T04:27:47,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891327682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891327683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891327685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891327686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:47,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891327686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:47,779 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/df025d2bac1946ab80050273cc0b1c7c 2024-12-11T04:27:47,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/9d1e1ab9967c48a89754867c72ddd176 is 50, key is test_row_0/C:col10/1733891264920/Put/seqid=0 2024-12-11T04:27:47,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742188_1364 (size=12301) 2024-12-11T04:27:47,794 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/9d1e1ab9967c48a89754867c72ddd176 2024-12-11T04:27:47,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/59ed8ece829d4a91acf2c2d4b2d65cf2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/59ed8ece829d4a91acf2c2d4b2d65cf2 2024-12-11T04:27:47,811 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/59ed8ece829d4a91acf2c2d4b2d65cf2, entries=150, sequenceid=425, filesize=12.0 K 2024-12-11T04:27:47,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/df025d2bac1946ab80050273cc0b1c7c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/df025d2bac1946ab80050273cc0b1c7c 2024-12-11T04:27:47,815 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/df025d2bac1946ab80050273cc0b1c7c, entries=150, sequenceid=425, filesize=12.0 K 2024-12-11T04:27:47,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/9d1e1ab9967c48a89754867c72ddd176 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/9d1e1ab9967c48a89754867c72ddd176 2024-12-11T04:27:47,820 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/9d1e1ab9967c48a89754867c72ddd176, entries=150, sequenceid=425, filesize=12.0 K 2024-12-11T04:27:47,821 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for d07fdbcaf0fe943be9e071492694f078 in 865ms, sequenceid=425, compaction requested=true 2024-12-11T04:27:47,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:47,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:47,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=100 2024-12-11T04:27:47,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=100 2024-12-11T04:27:47,826 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-11T04:27:47,826 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0220 sec 2024-12-11T04:27:47,827 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=99, table=TestAcidGuarantees in 1.0260 sec 2024-12-11T04:27:47,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-11T04:27:47,904 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 99 completed 2024-12-11T04:27:47,905 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:47,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=101, table=TestAcidGuarantees 2024-12-11T04:27:47,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-12-11T04:27:47,907 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=101, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=101, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:47,907 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=101, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=101, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:47,907 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:48,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-12-11T04:27:48,059 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=102 2024-12-11T04:27:48,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:48,060 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T04:27:48,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:48,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:48,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:48,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:48,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:48,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:48,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/de775693e2614ab8998505410cf4917e is 50, key is test_row_0/A:col10/1733891267070/Put/seqid=0 2024-12-11T04:27:48,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742189_1365 (size=12301) 2024-12-11T04:27:48,070 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/de775693e2614ab8998505410cf4917e 2024-12-11T04:27:48,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/8f466677bef34be98e283c65f4a4aa1e is 50, key is test_row_0/B:col10/1733891267070/Put/seqid=0 2024-12-11T04:27:48,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742190_1366 (size=12301) 2024-12-11T04:27:48,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:48,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. as already flushing 2024-12-11T04:27:48,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891328196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891328197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891328197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,202 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891328198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891328199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-12-11T04:27:48,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891328302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891328302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891328303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891328303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891328304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,483 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/8f466677bef34be98e283c65f4a4aa1e 2024-12-11T04:27:48,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/f94201785c944663bd2e79f4b726f684 is 50, key is test_row_0/C:col10/1733891267070/Put/seqid=0 2024-12-11T04:27:48,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742191_1367 (size=12301) 2024-12-11T04:27:48,493 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/f94201785c944663bd2e79f4b726f684 2024-12-11T04:27:48,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/de775693e2614ab8998505410cf4917e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/de775693e2614ab8998505410cf4917e 2024-12-11T04:27:48,500 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/de775693e2614ab8998505410cf4917e, entries=150, sequenceid=447, filesize=12.0 K 2024-12-11T04:27:48,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/8f466677bef34be98e283c65f4a4aa1e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/8f466677bef34be98e283c65f4a4aa1e 2024-12-11T04:27:48,508 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/8f466677bef34be98e283c65f4a4aa1e, entries=150, sequenceid=447, filesize=12.0 K 2024-12-11T04:27:48,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-12-11T04:27:48,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53952 deadline: 1733891328506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/f94201785c944663bd2e79f4b726f684 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/f94201785c944663bd2e79f4b726f684 2024-12-11T04:27:48,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53912 deadline: 1733891328507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53936 deadline: 1733891328507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53898 deadline: 1733891328507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,510 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:48,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53926 deadline: 1733891328507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:48,513 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/f94201785c944663bd2e79f4b726f684, entries=150, sequenceid=447, filesize=12.0 K 2024-12-11T04:27:48,514 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for d07fdbcaf0fe943be9e071492694f078 in 453ms, sequenceid=447, compaction requested=true 2024-12-11T04:27:48,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:48,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:48,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-11T04:27:48,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=102 2024-12-11T04:27:48,517 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-11T04:27:48,517 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 608 msec 2024-12-11T04:27:48,518 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=101, table=TestAcidGuarantees in 612 msec 2024-12-11T04:27:48,617 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T04:27:48,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:48,815 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-11T04:27:48,815 DEBUG [Thread-1257 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34cb3991 to 127.0.0.1:50078 2024-12-11T04:27:48,815 DEBUG [Thread-1257 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:48,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:48,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:48,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:48,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:48,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:48,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:48,816 DEBUG [Thread-1259 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e9ae050 to 127.0.0.1:50078 2024-12-11T04:27:48,816 DEBUG [Thread-1259 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:48,818 DEBUG [Thread-1265 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a0e9c8f to 127.0.0.1:50078 2024-12-11T04:27:48,818 DEBUG [Thread-1265 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:48,818 DEBUG [Thread-1276 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f5b2180 to 127.0.0.1:50078 2024-12-11T04:27:48,818 DEBUG [Thread-1276 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:48,820 DEBUG [Thread-1268 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c299cfb to 127.0.0.1:50078 2024-12-11T04:27:48,821 DEBUG [Thread-1268 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:48,821 DEBUG [Thread-1272 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3677bd4f to 127.0.0.1:50078 2024-12-11T04:27:48,821 DEBUG [Thread-1272 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:48,822 DEBUG [Thread-1270 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x605827c9 to 127.0.0.1:50078 2024-12-11T04:27:48,822 DEBUG [Thread-1270 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:48,823 DEBUG [Thread-1261 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2fef31f8 to 127.0.0.1:50078 2024-12-11T04:27:48,823 DEBUG [Thread-1263 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0eb04aeb to 127.0.0.1:50078 2024-12-11T04:27:48,823 DEBUG [Thread-1263 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:48,823 DEBUG [Thread-1261 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:48,824 DEBUG [Thread-1274 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x521aad6f to 127.0.0.1:50078 2024-12-11T04:27:48,824 DEBUG [Thread-1274 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:48,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/3ac280ae503b453ba2fdfb1576984891 is 50, key is test_row_0/A:col10/1733891268814/Put/seqid=0 2024-12-11T04:27:48,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742192_1368 (size=17181) 2024-12-11T04:27:49,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-12-11T04:27:49,010 INFO [Thread-1267 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 101 completed 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 116 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6398 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6508 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6233 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6381 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6478 2024-12-11T04:27:49,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-11T04:27:49,010 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T04:27:49,010 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c480dfb to 127.0.0.1:50078 2024-12-11T04:27:49,011 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:27:49,011 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-11T04:27:49,012 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-11T04:27:49,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=103, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:49,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-12-11T04:27:49,014 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891269014"}]},"ts":"1733891269014"} 2024-12-11T04:27:49,015 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-11T04:27:49,017 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-11T04:27:49,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T04:27:49,018 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d07fdbcaf0fe943be9e071492694f078, UNASSIGN}] 2024-12-11T04:27:49,019 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=d07fdbcaf0fe943be9e071492694f078, UNASSIGN 2024-12-11T04:27:49,019 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=d07fdbcaf0fe943be9e071492694f078, regionState=CLOSING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:49,020 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T04:27:49,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE; CloseRegionProcedure d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:27:49,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-12-11T04:27:49,171 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:49,172 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(124): Close d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:49,172 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T04:27:49,172 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1681): Closing d07fdbcaf0fe943be9e071492694f078, disabling compactions & flushes 2024-12-11T04:27:49,172 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:49,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/3ac280ae503b453ba2fdfb1576984891 2024-12-11T04:27:49,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/2aecf12c6e434308bf94baeb704b077b is 50, key is test_row_0/B:col10/1733891268814/Put/seqid=0 2024-12-11T04:27:49,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742193_1369 (size=12301) 2024-12-11T04:27:49,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-12-11T04:27:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-12-11T04:27:49,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/2aecf12c6e434308bf94baeb704b077b 2024-12-11T04:27:49,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/1f37ccc094e74a1bb1735003486a3313 is 50, key is test_row_0/C:col10/1733891268814/Put/seqid=0 2024-12-11T04:27:49,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742194_1370 (size=12301) 2024-12-11T04:27:50,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=465 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/1f37ccc094e74a1bb1735003486a3313 2024-12-11T04:27:50,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/3ac280ae503b453ba2fdfb1576984891 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/3ac280ae503b453ba2fdfb1576984891 2024-12-11T04:27:50,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/3ac280ae503b453ba2fdfb1576984891, entries=250, sequenceid=465, filesize=16.8 K 2024-12-11T04:27:50,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/2aecf12c6e434308bf94baeb704b077b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/2aecf12c6e434308bf94baeb704b077b 2024-12-11T04:27:50,059 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/2aecf12c6e434308bf94baeb704b077b, entries=150, sequenceid=465, filesize=12.0 K 2024-12-11T04:27:50,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/1f37ccc094e74a1bb1735003486a3313 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/1f37ccc094e74a1bb1735003486a3313 2024-12-11T04:27:50,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/1f37ccc094e74a1bb1735003486a3313, entries=150, sequenceid=465, filesize=12.0 K 2024-12-11T04:27:50,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=20.13 KB/20610 for d07fdbcaf0fe943be9e071492694f078 in 1248ms, sequenceid=465, compaction requested=true 2024-12-11T04:27:50,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:50,063 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:50,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:50,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:50,063 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:50,063 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. because compaction request was cancelled 2024-12-11T04:27:50,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:50,064 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. after waiting 0 ms 2024-12-11T04:27:50,064 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:A 2024-12-11T04:27:50,064 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:50,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:50,064 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. because compaction request was cancelled 2024-12-11T04:27:50,064 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:B 2024-12-11T04:27:50,064 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. because compaction request was cancelled 2024-12-11T04:27:50,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store d07fdbcaf0fe943be9e071492694f078:C, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:50,064 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: d07fdbcaf0fe943be9e071492694f078:C 2024-12-11T04:27:50,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:50,064 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(2837): Flushing d07fdbcaf0fe943be9e071492694f078 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-11T04:27:50,064 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=A 2024-12-11T04:27:50,064 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:50,064 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=B 2024-12-11T04:27:50,064 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:50,064 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.CompactingMemStore(205): FLUSHING TO DISK d07fdbcaf0fe943be9e071492694f078, store=C 2024-12-11T04:27:50,064 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:50,067 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/bb8421343fc3403cb7d139b4e6569469 is 50, key is test_row_0/A:col10/1733891268820/Put/seqid=0 2024-12-11T04:27:50,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742195_1371 (size=9857) 2024-12-11T04:27:50,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-12-11T04:27:50,471 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/bb8421343fc3403cb7d139b4e6569469 2024-12-11T04:27:50,477 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/389a37031cb84ad5a504ce43e1092e81 is 50, key is test_row_0/B:col10/1733891268820/Put/seqid=0 2024-12-11T04:27:50,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742196_1372 (size=9857) 2024-12-11T04:27:50,881 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/389a37031cb84ad5a504ce43e1092e81 2024-12-11T04:27:50,886 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/00f17dffe9b7481bb94ef0ce70538bda is 50, key is test_row_0/C:col10/1733891268820/Put/seqid=0 2024-12-11T04:27:50,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742197_1373 (size=9857) 2024-12-11T04:27:51,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-12-11T04:27:51,290 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=471 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/00f17dffe9b7481bb94ef0ce70538bda 2024-12-11T04:27:51,294 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/A/bb8421343fc3403cb7d139b4e6569469 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/bb8421343fc3403cb7d139b4e6569469 2024-12-11T04:27:51,297 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/bb8421343fc3403cb7d139b4e6569469, entries=100, sequenceid=471, filesize=9.6 K 2024-12-11T04:27:51,298 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/B/389a37031cb84ad5a504ce43e1092e81 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/389a37031cb84ad5a504ce43e1092e81 2024-12-11T04:27:51,301 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/389a37031cb84ad5a504ce43e1092e81, entries=100, sequenceid=471, filesize=9.6 K 2024-12-11T04:27:51,302 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/.tmp/C/00f17dffe9b7481bb94ef0ce70538bda as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/00f17dffe9b7481bb94ef0ce70538bda 2024-12-11T04:27:51,304 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/00f17dffe9b7481bb94ef0ce70538bda, entries=100, sequenceid=471, filesize=9.6 K 2024-12-11T04:27:51,305 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for d07fdbcaf0fe943be9e071492694f078 in 1241ms, sequenceid=471, compaction requested=true 2024-12-11T04:27:51,306 DEBUG [StoreCloser-TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d751ee1a0f2b4d07bf1758e334756765, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b7c85666f7744a6b9aa2af31f984ad92, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/de59b864c537416bb52af9983fdbc6bf, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/f529e8ddda754fe886000789e59e4715, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/c7f66a38e0d547bda7e192b29e5523ed, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b1506d26d2c54e19bc9a36326fa783d3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/a640cf02818847c19c96027244bedf33, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/34c776df92cd4685b7dfcfdff73f9dc3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/ee6ea9b077ff40f09a97178d09e3691c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e5157749a5114b7f910279a352e85399, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/08a3efa925e6488da87da812a534c3e0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b3d391122bf64c25acc4e1fe70bee2f5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/0e3487774c7c40e3a7741897e81aee0b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/4f6320768b7842cd9b3c17cad28ef17e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/9c60abba401a416ea48a3b05ad7a8fa6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e1978185f0ab484da787afcf3eac8d02, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/697ec88bdffe4612b996b6c8510c4e3c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/51a32bc5fc4f445ba2b4b35c0de8155a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e655e20da2154b9c912f52f973629e0b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/2132328d46754e02a9a6104c872135f5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/8b8b45d4489a456ab0626013b14625ac, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/c7ff829234fc4b2a9094df3b037ecd50] to archive 2024-12-11T04:27:51,306 DEBUG [StoreCloser-TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:27:51,309 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d751ee1a0f2b4d07bf1758e334756765 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d751ee1a0f2b4d07bf1758e334756765 2024-12-11T04:27:51,309 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b7c85666f7744a6b9aa2af31f984ad92 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b7c85666f7744a6b9aa2af31f984ad92 2024-12-11T04:27:51,309 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/de59b864c537416bb52af9983fdbc6bf to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/de59b864c537416bb52af9983fdbc6bf 2024-12-11T04:27:51,309 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/f529e8ddda754fe886000789e59e4715 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/f529e8ddda754fe886000789e59e4715 2024-12-11T04:27:51,309 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/a640cf02818847c19c96027244bedf33 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/a640cf02818847c19c96027244bedf33 2024-12-11T04:27:51,310 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/c7f66a38e0d547bda7e192b29e5523ed to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/c7f66a38e0d547bda7e192b29e5523ed 2024-12-11T04:27:51,310 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b1506d26d2c54e19bc9a36326fa783d3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b1506d26d2c54e19bc9a36326fa783d3 2024-12-11T04:27:51,310 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/34c776df92cd4685b7dfcfdff73f9dc3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/34c776df92cd4685b7dfcfdff73f9dc3 2024-12-11T04:27:51,311 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/ee6ea9b077ff40f09a97178d09e3691c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/ee6ea9b077ff40f09a97178d09e3691c 2024-12-11T04:27:51,311 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/08a3efa925e6488da87da812a534c3e0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/08a3efa925e6488da87da812a534c3e0 2024-12-11T04:27:51,312 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b3d391122bf64c25acc4e1fe70bee2f5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/b3d391122bf64c25acc4e1fe70bee2f5 2024-12-11T04:27:51,312 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/0e3487774c7c40e3a7741897e81aee0b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/0e3487774c7c40e3a7741897e81aee0b 2024-12-11T04:27:51,312 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e5157749a5114b7f910279a352e85399 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e5157749a5114b7f910279a352e85399 2024-12-11T04:27:51,312 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/4f6320768b7842cd9b3c17cad28ef17e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/4f6320768b7842cd9b3c17cad28ef17e 2024-12-11T04:27:51,312 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/9c60abba401a416ea48a3b05ad7a8fa6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/9c60abba401a416ea48a3b05ad7a8fa6 2024-12-11T04:27:51,312 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e1978185f0ab484da787afcf3eac8d02 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e1978185f0ab484da787afcf3eac8d02 2024-12-11T04:27:51,313 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/697ec88bdffe4612b996b6c8510c4e3c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/697ec88bdffe4612b996b6c8510c4e3c 2024-12-11T04:27:51,313 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/51a32bc5fc4f445ba2b4b35c0de8155a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/51a32bc5fc4f445ba2b4b35c0de8155a 2024-12-11T04:27:51,313 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e655e20da2154b9c912f52f973629e0b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/e655e20da2154b9c912f52f973629e0b 2024-12-11T04:27:51,313 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/2132328d46754e02a9a6104c872135f5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/2132328d46754e02a9a6104c872135f5 2024-12-11T04:27:51,314 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/8b8b45d4489a456ab0626013b14625ac to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/8b8b45d4489a456ab0626013b14625ac 2024-12-11T04:27:51,314 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/c7ff829234fc4b2a9094df3b037ecd50 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/c7ff829234fc4b2a9094df3b037ecd50 2024-12-11T04:27:51,315 DEBUG [StoreCloser-TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/9ffae03e8f0b439a98e6e278fa53714f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ce274a16e02249c099d726a6b2b727ed, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/eef115c015274204acce84bc26e5a3ed, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/bced0a4717394a3887902beb61e12a2b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/76f3a91de3ff4155bd728e34a8602195, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d959bf47e83b4102ae3a844e9ca38ae9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/bcd2c286a6864a5b8e8240723137196a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/1addb432485f4d389e150941f985843c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/372bf382856d41aea31231483409e8ad, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ddb3ae1d69b84682a8946ac0a88d3dd5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/71ac100b8f244b28a34a4b82f3ecdc35, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/084b5256b72443a2a2d9c1f2d9d84c1c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/87faf121ea824e95a4b5b15f9e07b64a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/dce78cda5f7b423986d53f4679d02ca1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/afe96d23b2bc42dc9704570b8bacd872, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/e48167d3b37b42efb671c09b19298bf0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/e9535959d15b45adab1218d657b1f382, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/26af753268f24d3c9b0b4e78efe5a881, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d5329b325b5344ae9f04c9cfafe3a998, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d8e9c0c695fd4d948dbe3544daa2747e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ec8890b724c94cb888bbdf0a4aff8343, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/981d584e30e140199fda5bb99b623085] to archive 2024-12-11T04:27:51,316 DEBUG [StoreCloser-TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:27:51,318 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ce274a16e02249c099d726a6b2b727ed to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ce274a16e02249c099d726a6b2b727ed 2024-12-11T04:27:51,318 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/9ffae03e8f0b439a98e6e278fa53714f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/9ffae03e8f0b439a98e6e278fa53714f 2024-12-11T04:27:51,318 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/bced0a4717394a3887902beb61e12a2b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/bced0a4717394a3887902beb61e12a2b 2024-12-11T04:27:51,318 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/76f3a91de3ff4155bd728e34a8602195 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/76f3a91de3ff4155bd728e34a8602195 2024-12-11T04:27:51,318 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/1addb432485f4d389e150941f985843c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/1addb432485f4d389e150941f985843c 2024-12-11T04:27:51,318 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/eef115c015274204acce84bc26e5a3ed to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/eef115c015274204acce84bc26e5a3ed 2024-12-11T04:27:51,318 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d959bf47e83b4102ae3a844e9ca38ae9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d959bf47e83b4102ae3a844e9ca38ae9 2024-12-11T04:27:51,318 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/bcd2c286a6864a5b8e8240723137196a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/bcd2c286a6864a5b8e8240723137196a 2024-12-11T04:27:51,319 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/084b5256b72443a2a2d9c1f2d9d84c1c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/084b5256b72443a2a2d9c1f2d9d84c1c 2024-12-11T04:27:51,320 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/87faf121ea824e95a4b5b15f9e07b64a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/87faf121ea824e95a4b5b15f9e07b64a 2024-12-11T04:27:51,320 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ddb3ae1d69b84682a8946ac0a88d3dd5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ddb3ae1d69b84682a8946ac0a88d3dd5 2024-12-11T04:27:51,320 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/dce78cda5f7b423986d53f4679d02ca1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/dce78cda5f7b423986d53f4679d02ca1 2024-12-11T04:27:51,320 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/afe96d23b2bc42dc9704570b8bacd872 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/afe96d23b2bc42dc9704570b8bacd872 2024-12-11T04:27:51,320 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/71ac100b8f244b28a34a4b82f3ecdc35 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/71ac100b8f244b28a34a4b82f3ecdc35 2024-12-11T04:27:51,321 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/372bf382856d41aea31231483409e8ad to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/372bf382856d41aea31231483409e8ad 2024-12-11T04:27:51,321 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/e48167d3b37b42efb671c09b19298bf0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/e48167d3b37b42efb671c09b19298bf0 2024-12-11T04:27:51,322 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/e9535959d15b45adab1218d657b1f382 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/e9535959d15b45adab1218d657b1f382 2024-12-11T04:27:51,322 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d8e9c0c695fd4d948dbe3544daa2747e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d8e9c0c695fd4d948dbe3544daa2747e 2024-12-11T04:27:51,322 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d5329b325b5344ae9f04c9cfafe3a998 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/d5329b325b5344ae9f04c9cfafe3a998 2024-12-11T04:27:51,322 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/981d584e30e140199fda5bb99b623085 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/981d584e30e140199fda5bb99b623085 2024-12-11T04:27:51,322 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/26af753268f24d3c9b0b4e78efe5a881 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/26af753268f24d3c9b0b4e78efe5a881 2024-12-11T04:27:51,322 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ec8890b724c94cb888bbdf0a4aff8343 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/ec8890b724c94cb888bbdf0a4aff8343 2024-12-11T04:27:51,323 DEBUG [StoreCloser-TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6556c5529e564dd18437b0fdcde7b7b4, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ef73f29393544c2bb3e1e24fec352215, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/c12b282c791c4b9c891c7996720c29ab, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/812cdc95a4eb4cfc8eb3e31841cab666, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/7d4f92a8c8bb474c910a25d0d09bf3bb, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/0dbfb537a12a410b9c0c8d9d65be2943, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/efc5baf81a2f4efcbab255620642d00d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6427c44225314cb28936c7843a43777e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6c858b37a6f14972947c8c24483048c4, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/178b4a9a4c40444f89ca16fc6ff8a9f4, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/cec9634a396547ea8d190767f21df470, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a02f87e2a5cc41168b714ab210e708ff, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/83e719f28f8941a88166888a4506b781, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ea201e10d857403f898d8937d34d9f1c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/47a05691df2e4d3dbc8287c7ba987142, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/87ea0f1bda2146bf8f5c07915ef77d68, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a9c8821645e24ea38f18bb40e67a2f0b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ed74b69b03d94582a8504fd38cfd8d5e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/dd90aa33a98d4e9aa3e4c06d53b95cf6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/b466467e180149ea9da2b7fde4f13b72, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/242adfe0802d4f05a745dc514e5f93ee, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a0943931a1e04647a6418939ba222f9e] to archive 2024-12-11T04:27:51,324 DEBUG [StoreCloser-TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:27:51,326 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6556c5529e564dd18437b0fdcde7b7b4 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6556c5529e564dd18437b0fdcde7b7b4 2024-12-11T04:27:51,326 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/7d4f92a8c8bb474c910a25d0d09bf3bb to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/7d4f92a8c8bb474c910a25d0d09bf3bb 2024-12-11T04:27:51,326 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ef73f29393544c2bb3e1e24fec352215 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ef73f29393544c2bb3e1e24fec352215 2024-12-11T04:27:51,327 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/812cdc95a4eb4cfc8eb3e31841cab666 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/812cdc95a4eb4cfc8eb3e31841cab666 2024-12-11T04:27:51,327 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/0dbfb537a12a410b9c0c8d9d65be2943 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/0dbfb537a12a410b9c0c8d9d65be2943 2024-12-11T04:27:51,327 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/c12b282c791c4b9c891c7996720c29ab to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/c12b282c791c4b9c891c7996720c29ab 2024-12-11T04:27:51,327 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6427c44225314cb28936c7843a43777e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6427c44225314cb28936c7843a43777e 2024-12-11T04:27:51,327 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/efc5baf81a2f4efcbab255620642d00d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/efc5baf81a2f4efcbab255620642d00d 2024-12-11T04:27:51,328 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6c858b37a6f14972947c8c24483048c4 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/6c858b37a6f14972947c8c24483048c4 2024-12-11T04:27:51,328 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/cec9634a396547ea8d190767f21df470 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/cec9634a396547ea8d190767f21df470 2024-12-11T04:27:51,328 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/83e719f28f8941a88166888a4506b781 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/83e719f28f8941a88166888a4506b781 2024-12-11T04:27:51,328 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a02f87e2a5cc41168b714ab210e708ff to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a02f87e2a5cc41168b714ab210e708ff 2024-12-11T04:27:51,329 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/87ea0f1bda2146bf8f5c07915ef77d68 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/87ea0f1bda2146bf8f5c07915ef77d68 2024-12-11T04:27:51,329 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/178b4a9a4c40444f89ca16fc6ff8a9f4 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/178b4a9a4c40444f89ca16fc6ff8a9f4 2024-12-11T04:27:51,329 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/47a05691df2e4d3dbc8287c7ba987142 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/47a05691df2e4d3dbc8287c7ba987142 2024-12-11T04:27:51,329 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ea201e10d857403f898d8937d34d9f1c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ea201e10d857403f898d8937d34d9f1c 2024-12-11T04:27:51,330 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a9c8821645e24ea38f18bb40e67a2f0b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a9c8821645e24ea38f18bb40e67a2f0b 2024-12-11T04:27:51,330 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/b466467e180149ea9da2b7fde4f13b72 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/b466467e180149ea9da2b7fde4f13b72 2024-12-11T04:27:51,330 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ed74b69b03d94582a8504fd38cfd8d5e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/ed74b69b03d94582a8504fd38cfd8d5e 2024-12-11T04:27:51,330 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/242adfe0802d4f05a745dc514e5f93ee to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/242adfe0802d4f05a745dc514e5f93ee 2024-12-11T04:27:51,331 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/dd90aa33a98d4e9aa3e4c06d53b95cf6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/dd90aa33a98d4e9aa3e4c06d53b95cf6 2024-12-11T04:27:51,331 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a0943931a1e04647a6418939ba222f9e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/a0943931a1e04647a6418939ba222f9e 2024-12-11T04:27:51,334 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/recovered.edits/474.seqid, newMaxSeqId=474, maxSeqId=1 2024-12-11T04:27:51,335 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078. 2024-12-11T04:27:51,335 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] regionserver.HRegion(1635): Region close journal for d07fdbcaf0fe943be9e071492694f078: 2024-12-11T04:27:51,336 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=106}] handler.UnassignRegionHandler(170): Closed d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:51,336 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=105 updating hbase:meta row=d07fdbcaf0fe943be9e071492694f078, regionState=CLOSED 2024-12-11T04:27:51,338 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=105 2024-12-11T04:27:51,338 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=105, state=SUCCESS; CloseRegionProcedure d07fdbcaf0fe943be9e071492694f078, server=5f466b3719ec,39071,1733891180267 in 2.3170 sec 2024-12-11T04:27:51,339 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-11T04:27:51,339 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=d07fdbcaf0fe943be9e071492694f078, UNASSIGN in 2.3200 sec 2024-12-11T04:27:51,340 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-12-11T04:27:51,340 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.3220 sec 2024-12-11T04:27:51,341 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891271341"}]},"ts":"1733891271341"} 2024-12-11T04:27:51,342 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-11T04:27:51,344 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-11T04:27:51,345 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.3320 sec 2024-12-11T04:27:53,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=103 2024-12-11T04:27:53,118 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 103 completed 2024-12-11T04:27:53,118 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-11T04:27:53,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:53,120 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=107, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:53,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-11T04:27:53,120 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=107, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:53,121 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:53,123 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/recovered.edits] 2024-12-11T04:27:53,128 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/241909a43936433caf35344927c44e61 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/241909a43936433caf35344927c44e61 2024-12-11T04:27:53,128 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/bac846e2a8c54c0d982043a5c318ea5b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/bac846e2a8c54c0d982043a5c318ea5b 2024-12-11T04:27:53,128 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/59ed8ece829d4a91acf2c2d4b2d65cf2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/59ed8ece829d4a91acf2c2d4b2d65cf2 2024-12-11T04:27:53,128 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/3ac280ae503b453ba2fdfb1576984891 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/3ac280ae503b453ba2fdfb1576984891 2024-12-11T04:27:53,129 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/8278c92fbd75461eaf0cdeab6f8cdf62 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/8278c92fbd75461eaf0cdeab6f8cdf62 2024-12-11T04:27:53,129 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/bfa313d0bfd64da49b6d521c1dec3638 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/bfa313d0bfd64da49b6d521c1dec3638 2024-12-11T04:27:53,129 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/bb8421343fc3403cb7d139b4e6569469 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/bb8421343fc3403cb7d139b4e6569469 2024-12-11T04:27:53,129 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d694b42d7e2b4a58ab53df1a34dbdc6d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d694b42d7e2b4a58ab53df1a34dbdc6d 2024-12-11T04:27:53,129 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/de775693e2614ab8998505410cf4917e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/de775693e2614ab8998505410cf4917e 2024-12-11T04:27:53,129 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d3ed69d6c6ff431aa90249214d220a66 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/A/d3ed69d6c6ff431aa90249214d220a66 2024-12-11T04:27:53,134 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/2aecf12c6e434308bf94baeb704b077b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/2aecf12c6e434308bf94baeb704b077b 2024-12-11T04:27:53,134 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/389a37031cb84ad5a504ce43e1092e81 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/389a37031cb84ad5a504ce43e1092e81 2024-12-11T04:27:53,135 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/87cb2adfdf2649bfb3a4468c0b753225 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/87cb2adfdf2649bfb3a4468c0b753225 2024-12-11T04:27:53,135 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/300053bf126c429590def4ed0e6ece67 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/300053bf126c429590def4ed0e6ece67 2024-12-11T04:27:53,135 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/558d5232a0404a3abb797e648b4a06d4 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/558d5232a0404a3abb797e648b4a06d4 2024-12-11T04:27:53,135 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/5f35c362476d4b08ada49ded527ca3d6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/5f35c362476d4b08ada49ded527ca3d6 2024-12-11T04:27:53,135 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/5843c9531c224a8d914b7051adae5c52 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/5843c9531c224a8d914b7051adae5c52 2024-12-11T04:27:53,135 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/8f466677bef34be98e283c65f4a4aa1e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/8f466677bef34be98e283c65f4a4aa1e 2024-12-11T04:27:53,135 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/b5743d27b1364174a6520bc08cfebe78 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/b5743d27b1364174a6520bc08cfebe78 2024-12-11T04:27:53,135 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/df025d2bac1946ab80050273cc0b1c7c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/B/df025d2bac1946ab80050273cc0b1c7c 2024-12-11T04:27:53,139 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/00f17dffe9b7481bb94ef0ce70538bda to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/00f17dffe9b7481bb94ef0ce70538bda 2024-12-11T04:27:53,140 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/1f37ccc094e74a1bb1735003486a3313 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/1f37ccc094e74a1bb1735003486a3313 2024-12-11T04:27:53,140 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/8b3bc77628e047a8b3caf76aacbf6f99 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/8b3bc77628e047a8b3caf76aacbf6f99 2024-12-11T04:27:53,140 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/195e5b99ea3646e0aaff4daaf8e48602 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/195e5b99ea3646e0aaff4daaf8e48602 2024-12-11T04:27:53,140 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/1b7d0fc636fd4bc8950af70c1b600207 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/1b7d0fc636fd4bc8950af70c1b600207 2024-12-11T04:27:53,140 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/9d1e1ab9967c48a89754867c72ddd176 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/9d1e1ab9967c48a89754867c72ddd176 2024-12-11T04:27:53,140 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/088e3abec32b41cfb8851e2630a9693e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/088e3abec32b41cfb8851e2630a9693e 2024-12-11T04:27:53,141 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/eb08487fd41747839ad5a0767207b8b8 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/eb08487fd41747839ad5a0767207b8b8 2024-12-11T04:27:53,141 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/f94201785c944663bd2e79f4b726f684 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/f94201785c944663bd2e79f4b726f684 2024-12-11T04:27:53,141 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/d76822c13107449b8bcab21bab74f298 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/C/d76822c13107449b8bcab21bab74f298 2024-12-11T04:27:53,143 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/recovered.edits/474.seqid to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078/recovered.edits/474.seqid 2024-12-11T04:27:53,144 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/d07fdbcaf0fe943be9e071492694f078 2024-12-11T04:27:53,144 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-11T04:27:53,146 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=107, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:53,149 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-11T04:27:53,151 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-11T04:27:53,152 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=107, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:53,152 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-11T04:27:53,152 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733891273152"}]},"ts":"9223372036854775807"} 2024-12-11T04:27:53,154 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-11T04:27:53,154 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => d07fdbcaf0fe943be9e071492694f078, NAME => 'TestAcidGuarantees,,1733891246647.d07fdbcaf0fe943be9e071492694f078.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T04:27:53,154 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-11T04:27:53,154 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733891273154"}]},"ts":"9223372036854775807"} 2024-12-11T04:27:53,155 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-11T04:27:53,157 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=107, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:53,158 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 39 msec 2024-12-11T04:27:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-11T04:27:53,221 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 107 completed 2024-12-11T04:27:53,231 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=246 (was 244) - Thread LEAK? -, OpenFileDescriptor=453 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=398 (was 353) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3546 (was 3600) 2024-12-11T04:27:53,239 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=246, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=398, ProcessCount=11, AvailableMemoryMB=3546 2024-12-11T04:27:53,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T04:27:53,241 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T04:27:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:53,243 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=108, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T04:27:53,243 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:53,243 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 108 2024-12-11T04:27:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T04:27:53,244 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=108, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T04:27:53,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742198_1374 (size=963) 2024-12-11T04:27:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T04:27:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T04:27:53,650 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5 2024-12-11T04:27:53,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742199_1375 (size=53) 2024-12-11T04:27:53,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T04:27:54,056 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:27:54,056 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 9dbedb23b89b4496aba995340251c393, disabling compactions & flushes 2024-12-11T04:27:54,056 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:54,056 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:54,056 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. after waiting 0 ms 2024-12-11T04:27:54,056 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:54,056 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:54,056 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:27:54,057 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=108, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T04:27:54,057 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733891274057"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733891274057"}]},"ts":"1733891274057"} 2024-12-11T04:27:54,058 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T04:27:54,059 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=108, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T04:27:54,059 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891274059"}]},"ts":"1733891274059"} 2024-12-11T04:27:54,060 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-11T04:27:54,064 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9dbedb23b89b4496aba995340251c393, ASSIGN}] 2024-12-11T04:27:54,065 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9dbedb23b89b4496aba995340251c393, ASSIGN 2024-12-11T04:27:54,065 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9dbedb23b89b4496aba995340251c393, ASSIGN; state=OFFLINE, location=5f466b3719ec,39071,1733891180267; forceNewPlan=false, retain=false 2024-12-11T04:27:54,216 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=109 updating hbase:meta row=9dbedb23b89b4496aba995340251c393, regionState=OPENING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:54,217 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=110, ppid=109, state=RUNNABLE; OpenRegionProcedure 9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:27:54,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T04:27:54,368 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:54,372 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:54,372 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.HRegion(7285): Opening region: {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} 2024-12-11T04:27:54,372 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:54,372 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:27:54,372 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.HRegion(7327): checking encryption for 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:54,372 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.HRegion(7330): checking classloading for 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:54,373 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:54,374 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:27:54,375 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9dbedb23b89b4496aba995340251c393 columnFamilyName A 2024-12-11T04:27:54,375 DEBUG [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:54,375 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.HStore(327): Store=9dbedb23b89b4496aba995340251c393/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:27:54,375 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:54,376 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:27:54,376 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9dbedb23b89b4496aba995340251c393 columnFamilyName B 2024-12-11T04:27:54,376 DEBUG [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:54,377 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.HStore(327): Store=9dbedb23b89b4496aba995340251c393/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:27:54,377 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:54,378 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:27:54,378 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9dbedb23b89b4496aba995340251c393 columnFamilyName C 2024-12-11T04:27:54,378 DEBUG [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:54,378 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.HStore(327): Store=9dbedb23b89b4496aba995340251c393/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:27:54,378 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:54,379 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:54,379 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:54,380 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T04:27:54,381 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.HRegion(1085): writing seq id for 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:54,383 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T04:27:54,383 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.HRegion(1102): Opened 9dbedb23b89b4496aba995340251c393; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67635519, jitterRate=0.007847771048545837}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T04:27:54,384 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.HRegion(1001): Region open journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:27:54,384 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., pid=110, masterSystemTime=1733891274368 2024-12-11T04:27:54,385 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:54,386 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=110}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:54,386 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=109 updating hbase:meta row=9dbedb23b89b4496aba995340251c393, regionState=OPEN, openSeqNum=2, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:54,388 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=110, resume processing ppid=109 2024-12-11T04:27:54,388 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, ppid=109, state=SUCCESS; OpenRegionProcedure 9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 in 170 msec 2024-12-11T04:27:54,389 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-11T04:27:54,389 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9dbedb23b89b4496aba995340251c393, ASSIGN in 324 msec 2024-12-11T04:27:54,389 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=108, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T04:27:54,389 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891274389"}]},"ts":"1733891274389"} 2024-12-11T04:27:54,390 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-11T04:27:54,392 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=108, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T04:27:54,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1510 sec 2024-12-11T04:27:55,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-11T04:27:55,347 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-11T04:27:55,348 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62f74604 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ec15031 2024-12-11T04:27:55,352 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2df33cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:55,353 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:55,354 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44958, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:55,355 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T04:27:55,356 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52644, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T04:27:55,357 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T04:27:55,357 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T04:27:55,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=111, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-11T04:27:55,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742200_1376 (size=999) 2024-12-11T04:27:55,768 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-11T04:27:55,768 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-11T04:27:55,770 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=112, ppid=111, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T04:27:55,772 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9dbedb23b89b4496aba995340251c393, REOPEN/MOVE}] 2024-12-11T04:27:55,772 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=113, ppid=112, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9dbedb23b89b4496aba995340251c393, REOPEN/MOVE 2024-12-11T04:27:55,773 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=113 updating hbase:meta row=9dbedb23b89b4496aba995340251c393, regionState=CLOSING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:55,773 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T04:27:55,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=113, state=RUNNABLE; CloseRegionProcedure 9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:27:55,925 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:55,925 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(124): Close 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:55,925 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T04:27:55,925 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1681): Closing 9dbedb23b89b4496aba995340251c393, disabling compactions & flushes 2024-12-11T04:27:55,925 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:55,925 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:55,925 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. after waiting 0 ms 2024-12-11T04:27:55,925 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:55,929 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-11T04:27:55,929 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:55,929 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1635): Region close journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:27:55,929 WARN [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegionServer(3786): Not adding moved region record: 9dbedb23b89b4496aba995340251c393 to self. 2024-12-11T04:27:55,931 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(170): Closed 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:55,931 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=113 updating hbase:meta row=9dbedb23b89b4496aba995340251c393, regionState=CLOSED 2024-12-11T04:27:55,933 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=113 2024-12-11T04:27:55,933 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=113, state=SUCCESS; CloseRegionProcedure 9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 in 159 msec 2024-12-11T04:27:55,933 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=113, ppid=112, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9dbedb23b89b4496aba995340251c393, REOPEN/MOVE; state=CLOSED, location=5f466b3719ec,39071,1733891180267; forceNewPlan=false, retain=true 2024-12-11T04:27:56,084 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=113 updating hbase:meta row=9dbedb23b89b4496aba995340251c393, regionState=OPENING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=113, state=RUNNABLE; OpenRegionProcedure 9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:27:56,236 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,238 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:56,239 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(7285): Opening region: {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} 2024-12-11T04:27:56,239 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:56,239 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:27:56,239 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(7327): checking encryption for 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:56,239 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(7330): checking classloading for 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:56,241 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:56,241 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:27:56,242 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9dbedb23b89b4496aba995340251c393 columnFamilyName A 2024-12-11T04:27:56,243 DEBUG [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:56,243 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.HStore(327): Store=9dbedb23b89b4496aba995340251c393/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:27:56,243 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:56,244 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:27:56,244 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9dbedb23b89b4496aba995340251c393 columnFamilyName B 2024-12-11T04:27:56,244 DEBUG [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:56,244 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.HStore(327): Store=9dbedb23b89b4496aba995340251c393/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:27:56,244 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:56,245 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:27:56,245 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9dbedb23b89b4496aba995340251c393 columnFamilyName C 2024-12-11T04:27:56,245 DEBUG [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:56,245 INFO [StoreOpener-9dbedb23b89b4496aba995340251c393-1 {}] regionserver.HStore(327): Store=9dbedb23b89b4496aba995340251c393/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:27:56,245 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:56,246 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:56,247 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:56,248 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T04:27:56,249 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(1085): writing seq id for 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:56,250 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(1102): Opened 9dbedb23b89b4496aba995340251c393; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62724475, jitterRate=-0.06533248722553253}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T04:27:56,251 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegion(1001): Region open journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:27:56,251 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., pid=115, masterSystemTime=1733891276236 2024-12-11T04:27:56,252 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:56,252 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=115}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:56,253 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=113 updating hbase:meta row=9dbedb23b89b4496aba995340251c393, regionState=OPEN, openSeqNum=5, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=113 2024-12-11T04:27:56,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=113, state=SUCCESS; OpenRegionProcedure 9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 in 169 msec 2024-12-11T04:27:56,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-11T04:27:56,256 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9dbedb23b89b4496aba995340251c393, REOPEN/MOVE in 483 msec 2024-12-11T04:27:56,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=111 2024-12-11T04:27:56,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=111, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 486 msec 2024-12-11T04:27:56,259 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 901 msec 2024-12-11T04:27:56,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=111 2024-12-11T04:27:56,260 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x49e13594 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3dd5b441 2024-12-11T04:27:56,267 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9f472e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:56,268 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c54a0d3 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c336ea4 2024-12-11T04:27:56,271 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@167a78b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:56,272 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3875c8c5 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f94d721 2024-12-11T04:27:56,276 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aee939b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:56,277 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0801ba40 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@319559be 2024-12-11T04:27:56,282 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f49665c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:56,283 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x27539bdc to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c907e21 2024-12-11T04:27:56,286 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683f8469, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:56,286 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61ec0f48 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75e4d3d0 2024-12-11T04:27:56,291 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37ec8e3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:56,292 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7819b9e2 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2b308f62 2024-12-11T04:27:56,299 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@787e5169, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:56,299 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47679076 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68035c67 2024-12-11T04:27:56,302 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@627cad17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:56,303 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4cb9e50e to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3eab689a 2024-12-11T04:27:56,308 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39387e4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:56,309 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3cb726fe to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59bd764a 2024-12-11T04:27:56,311 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@238db126, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:27:56,314 DEBUG [hconnection-0x4e3bebff-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:56,315 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44970, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:56,318 DEBUG [hconnection-0x17d93037-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:56,318 DEBUG [hconnection-0x4f281006-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:56,319 DEBUG [hconnection-0xa968a3b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:56,319 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:56,319 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44980, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:56,320 DEBUG [hconnection-0x63048f73-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:56,320 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:56,320 DEBUG [hconnection-0x7f6dc6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:56,320 DEBUG [hconnection-0x5b54ecf1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:56,321 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44998, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:56,321 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:56,322 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45012, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:56,323 DEBUG [hconnection-0x75f8e629-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:56,323 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45020, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:56,324 DEBUG [hconnection-0x33f275cd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:56,325 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45024, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:56,326 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:27:56,326 DEBUG [hconnection-0x36387f53-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:27:56,327 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45032, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:27:56,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-11T04:27:56,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T04:27:56,328 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:27:56,329 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:27:56,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:27:56,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:56,335 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:27:56,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:27:56,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:56,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:27:56,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:56,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:27:56,336 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:56,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891336357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891336357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891336358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891336363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891336357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,378 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117e7b7ae515cd44e291e5ea5e5e3ff792_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891276330/Put/seqid=0 2024-12-11T04:27:56,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742201_1377 (size=12154) 2024-12-11T04:27:56,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T04:27:56,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891336464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891336464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891336465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891336465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891336465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,480 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-11T04:27:56,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:56,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:27:56,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:56,481 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:56,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:56,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:56,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T04:27:56,633 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-11T04:27:56,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:56,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:27:56,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:56,634 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:56,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:56,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:56,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891336668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891336668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891336669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891336669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891336669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,787 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-11T04:27:56,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:56,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:27:56,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:56,789 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:56,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:56,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:56,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:56,794 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117e7b7ae515cd44e291e5ea5e5e3ff792_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117e7b7ae515cd44e291e5ea5e5e3ff792_9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:56,795 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/215977e3fc384d1cbdc0f368ec9fd434, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:27:56,796 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/215977e3fc384d1cbdc0f368ec9fd434 is 175, key is test_row_0/A:col10/1733891276330/Put/seqid=0 2024-12-11T04:27:56,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742202_1378 (size=30955) 2024-12-11T04:27:56,885 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T04:27:56,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T04:27:56,941 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-11T04:27:56,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:56,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:27:56,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:56,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:56,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:56,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:56,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891336974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891336974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891336974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891336975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:56,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:56,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891336975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:57,094 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:57,095 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-11T04:27:57,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:57,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:27:57,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:57,095 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:57,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:57,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:57,201 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/215977e3fc384d1cbdc0f368ec9fd434 2024-12-11T04:27:57,221 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/06380b79b0874ac8b2c49d1e31bcc466 is 50, key is test_row_0/B:col10/1733891276330/Put/seqid=0 2024-12-11T04:27:57,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742203_1379 (size=12001) 2024-12-11T04:27:57,247 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:57,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-11T04:27:57,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:57,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:27:57,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:57,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:57,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:57,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:57,399 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:57,399 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-11T04:27:57,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:57,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:27:57,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:57,400 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:57,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:57,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:57,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T04:27:57,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:57,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891337481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:57,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:57,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891337481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:57,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:57,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891337482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:57,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:57,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891337482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:57,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:57,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891337482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:57,552 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:57,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-11T04:27:57,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:57,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:27:57,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:57,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:57,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:57,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:27:57,627 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/06380b79b0874ac8b2c49d1e31bcc466 2024-12-11T04:27:57,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/71dc7e3d953b4e90b171cf1236c15b3f is 50, key is test_row_0/C:col10/1733891276330/Put/seqid=0 2024-12-11T04:27:57,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742204_1380 (size=12001) 2024-12-11T04:27:57,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/71dc7e3d953b4e90b171cf1236c15b3f 2024-12-11T04:27:57,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/215977e3fc384d1cbdc0f368ec9fd434 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/215977e3fc384d1cbdc0f368ec9fd434 2024-12-11T04:27:57,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/215977e3fc384d1cbdc0f368ec9fd434, entries=150, sequenceid=17, filesize=30.2 K 2024-12-11T04:27:57,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/06380b79b0874ac8b2c49d1e31bcc466 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/06380b79b0874ac8b2c49d1e31bcc466 2024-12-11T04:27:57,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/06380b79b0874ac8b2c49d1e31bcc466, entries=150, sequenceid=17, filesize=11.7 K 2024-12-11T04:27:57,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/71dc7e3d953b4e90b171cf1236c15b3f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/71dc7e3d953b4e90b171cf1236c15b3f 2024-12-11T04:27:57,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/71dc7e3d953b4e90b171cf1236c15b3f, entries=150, sequenceid=17, filesize=11.7 K 2024-12-11T04:27:57,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 9dbedb23b89b4496aba995340251c393 in 1334ms, sequenceid=17, compaction requested=false 2024-12-11T04:27:57,670 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-11T04:27:57,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:27:57,705 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:27:57,705 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-11T04:27:57,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:57,705 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T04:27:57,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:27:57,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:57,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:27:57,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:57,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:27:57,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:57,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121180f2f1d0905c4f77a3ad3d9d96038eb2_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891276353/Put/seqid=0 2024-12-11T04:27:57,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742205_1381 (size=12154) 2024-12-11T04:27:58,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:58,147 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121180f2f1d0905c4f77a3ad3d9d96038eb2_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121180f2f1d0905c4f77a3ad3d9d96038eb2_9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:58,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/22c3698675d8466a8911378b9ce89d26, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:27:58,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/22c3698675d8466a8911378b9ce89d26 is 175, key is test_row_0/A:col10/1733891276353/Put/seqid=0 2024-12-11T04:27:58,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742206_1382 (size=30955) 2024-12-11T04:27:58,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T04:27:58,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:58,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:27:58,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891338500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891338500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891338501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891338502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891338502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,554 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/22c3698675d8466a8911378b9ce89d26 2024-12-11T04:27:58,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/77d6f8db740a46639986b6f181caa194 is 50, key is test_row_0/B:col10/1733891276353/Put/seqid=0 2024-12-11T04:27:58,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742207_1383 (size=12001) 2024-12-11T04:27:58,570 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/77d6f8db740a46639986b6f181caa194 2024-12-11T04:27:58,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/587b0526e2f343f6a8ffc3f7116aa483 is 50, key is test_row_0/C:col10/1733891276353/Put/seqid=0 2024-12-11T04:27:58,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742208_1384 (size=12001) 2024-12-11T04:27:58,591 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/587b0526e2f343f6a8ffc3f7116aa483 2024-12-11T04:27:58,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/22c3698675d8466a8911378b9ce89d26 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/22c3698675d8466a8911378b9ce89d26 2024-12-11T04:27:58,599 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/22c3698675d8466a8911378b9ce89d26, entries=150, sequenceid=40, filesize=30.2 K 2024-12-11T04:27:58,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/77d6f8db740a46639986b6f181caa194 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/77d6f8db740a46639986b6f181caa194 2024-12-11T04:27:58,603 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/77d6f8db740a46639986b6f181caa194, entries=150, sequenceid=40, filesize=11.7 K 2024-12-11T04:27:58,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/587b0526e2f343f6a8ffc3f7116aa483 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/587b0526e2f343f6a8ffc3f7116aa483 2024-12-11T04:27:58,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891338607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891338607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891338608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891338609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891338609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,612 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/587b0526e2f343f6a8ffc3f7116aa483, entries=150, sequenceid=40, filesize=11.7 K 2024-12-11T04:27:58,613 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 9dbedb23b89b4496aba995340251c393 in 908ms, sequenceid=40, compaction requested=false 2024-12-11T04:27:58,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:27:58,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:58,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-11T04:27:58,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-11T04:27:58,615 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-11T04:27:58,616 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2850 sec 2024-12-11T04:27:58,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 2.2900 sec 2024-12-11T04:27:58,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:58,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T04:27:58,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:27:58,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:58,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:27:58,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:58,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:27:58,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:58,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211df253c39a4bc49818e43a60baca16bdb_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891278814/Put/seqid=0 2024-12-11T04:27:58,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742209_1385 (size=19474) 2024-12-11T04:27:58,832 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:58,836 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211df253c39a4bc49818e43a60baca16bdb_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211df253c39a4bc49818e43a60baca16bdb_9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:58,837 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/0bf240d3a1c543dd86f5c494835d6fef, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:27:58,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/0bf240d3a1c543dd86f5c494835d6fef is 175, key is test_row_0/A:col10/1733891278814/Put/seqid=0 2024-12-11T04:27:58,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742210_1386 (size=56733) 2024-12-11T04:27:58,846 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=56, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/0bf240d3a1c543dd86f5c494835d6fef 2024-12-11T04:27:58,855 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/0c85f7bf5b9d4f5fae3955ac151bbada is 50, key is test_row_0/B:col10/1733891278814/Put/seqid=0 2024-12-11T04:27:58,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891338847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742211_1387 (size=12001) 2024-12-11T04:27:58,862 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/0c85f7bf5b9d4f5fae3955ac151bbada 2024-12-11T04:27:58,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891338856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891338857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891338857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891338858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/aa56048ab6b243bfa39e00add912f90d is 50, key is test_row_0/C:col10/1733891278814/Put/seqid=0 2024-12-11T04:27:58,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742212_1388 (size=12001) 2024-12-11T04:27:58,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/aa56048ab6b243bfa39e00add912f90d 2024-12-11T04:27:58,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/0bf240d3a1c543dd86f5c494835d6fef as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/0bf240d3a1c543dd86f5c494835d6fef 2024-12-11T04:27:58,889 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/0bf240d3a1c543dd86f5c494835d6fef, entries=300, sequenceid=56, filesize=55.4 K 2024-12-11T04:27:58,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/0c85f7bf5b9d4f5fae3955ac151bbada as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/0c85f7bf5b9d4f5fae3955ac151bbada 2024-12-11T04:27:58,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/0c85f7bf5b9d4f5fae3955ac151bbada, entries=150, sequenceid=56, filesize=11.7 K 2024-12-11T04:27:58,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/aa56048ab6b243bfa39e00add912f90d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/aa56048ab6b243bfa39e00add912f90d 2024-12-11T04:27:58,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/aa56048ab6b243bfa39e00add912f90d, entries=150, sequenceid=56, filesize=11.7 K 2024-12-11T04:27:58,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=127.47 KB/130530 for 9dbedb23b89b4496aba995340251c393 in 85ms, sequenceid=56, compaction requested=true 2024-12-11T04:27:58,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:27:58,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:27:58,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:58,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:27:58,899 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:58,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:58,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:27:58,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:58,900 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:58,901 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:58,901 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 118643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:58,901 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/B is initiating minor compaction (all files) 2024-12-11T04:27:58,901 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/A is initiating minor compaction (all files) 2024-12-11T04:27:58,901 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/B in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:58,901 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/06380b79b0874ac8b2c49d1e31bcc466, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/77d6f8db740a46639986b6f181caa194, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/0c85f7bf5b9d4f5fae3955ac151bbada] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=35.2 K 2024-12-11T04:27:58,901 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/A in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:58,901 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/215977e3fc384d1cbdc0f368ec9fd434, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/22c3698675d8466a8911378b9ce89d26, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/0bf240d3a1c543dd86f5c494835d6fef] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=115.9 K 2024-12-11T04:27:58,901 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:58,901 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 06380b79b0874ac8b2c49d1e31bcc466, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733891276330 2024-12-11T04:27:58,901 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/215977e3fc384d1cbdc0f368ec9fd434, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/22c3698675d8466a8911378b9ce89d26, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/0bf240d3a1c543dd86f5c494835d6fef] 2024-12-11T04:27:58,902 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 77d6f8db740a46639986b6f181caa194, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733891276353 2024-12-11T04:27:58,902 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 215977e3fc384d1cbdc0f368ec9fd434, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733891276330 2024-12-11T04:27:58,902 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c85f7bf5b9d4f5fae3955ac151bbada, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733891278501 2024-12-11T04:27:58,902 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22c3698675d8466a8911378b9ce89d26, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733891276353 2024-12-11T04:27:58,903 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bf240d3a1c543dd86f5c494835d6fef, keycount=300, bloomtype=ROW, size=55.4 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733891278495 2024-12-11T04:27:58,909 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:27:58,910 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#B#compaction#333 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:58,910 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/35dec10af7ef45888bca493a4aa46a4f is 50, key is test_row_0/B:col10/1733891278814/Put/seqid=0 2024-12-11T04:27:58,912 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211fb91ed653cd54f038d7d27a07825f50a_9dbedb23b89b4496aba995340251c393 store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:27:58,914 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211fb91ed653cd54f038d7d27a07825f50a_9dbedb23b89b4496aba995340251c393, store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:27:58,914 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211fb91ed653cd54f038d7d27a07825f50a_9dbedb23b89b4496aba995340251c393 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:27:58,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742213_1389 (size=12104) 2024-12-11T04:27:58,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742214_1390 (size=4469) 2024-12-11T04:27:58,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:58,962 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-11T04:27:58,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:27:58,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:58,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:27:58,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:58,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:27:58,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:27:58,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121121c5fa36fc53404cb4ce4dba4112bea6_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891278854/Put/seqid=0 2024-12-11T04:27:58,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742215_1391 (size=14594) 2024-12-11T04:27:58,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891338980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891338987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,993 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891338987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891338989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:58,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:58,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891338989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891339090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891339094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891339094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891339098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891339099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891339293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891339301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891339301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891339302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891339302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,322 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/35dec10af7ef45888bca493a4aa46a4f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/35dec10af7ef45888bca493a4aa46a4f 2024-12-11T04:27:59,332 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#A#compaction#334 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:59,332 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/c88df1b2303e41e49fe8d4323c240e75 is 175, key is test_row_0/A:col10/1733891278814/Put/seqid=0 2024-12-11T04:27:59,334 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9dbedb23b89b4496aba995340251c393/B of 9dbedb23b89b4496aba995340251c393 into 35dec10af7ef45888bca493a4aa46a4f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:59,334 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:27:59,334 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/B, priority=13, startTime=1733891278899; duration=0sec 2024-12-11T04:27:59,334 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:27:59,334 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:B 2024-12-11T04:27:59,334 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:27:59,335 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:27:59,335 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/C is initiating minor compaction (all files) 2024-12-11T04:27:59,335 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/C in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:27:59,335 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/71dc7e3d953b4e90b171cf1236c15b3f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/587b0526e2f343f6a8ffc3f7116aa483, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/aa56048ab6b243bfa39e00add912f90d] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=35.2 K 2024-12-11T04:27:59,335 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 71dc7e3d953b4e90b171cf1236c15b3f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1733891276330 2024-12-11T04:27:59,336 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 587b0526e2f343f6a8ffc3f7116aa483, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1733891276353 2024-12-11T04:27:59,336 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting aa56048ab6b243bfa39e00add912f90d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733891278501 2024-12-11T04:27:59,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742216_1392 (size=31058) 2024-12-11T04:27:59,348 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#C#compaction#336 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:27:59,349 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/ec80a362ae4147cfb108d1aec3152e4d is 50, key is test_row_0/C:col10/1733891278814/Put/seqid=0 2024-12-11T04:27:59,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742217_1393 (size=12104) 2024-12-11T04:27:59,380 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:27:59,384 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121121c5fa36fc53404cb4ce4dba4112bea6_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121121c5fa36fc53404cb4ce4dba4112bea6_9dbedb23b89b4496aba995340251c393 2024-12-11T04:27:59,385 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/58410824898f4228a1fbcbe61ea5b1ee, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:27:59,386 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/58410824898f4228a1fbcbe61ea5b1ee is 175, key is test_row_0/A:col10/1733891278854/Put/seqid=0 2024-12-11T04:27:59,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742218_1394 (size=39549) 2024-12-11T04:27:59,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891339598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891339606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891339606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891339608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:27:59,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891339608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:27:59,753 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/c88df1b2303e41e49fe8d4323c240e75 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/c88df1b2303e41e49fe8d4323c240e75 2024-12-11T04:27:59,758 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/ec80a362ae4147cfb108d1aec3152e4d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/ec80a362ae4147cfb108d1aec3152e4d 2024-12-11T04:27:59,759 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9dbedb23b89b4496aba995340251c393/A of 9dbedb23b89b4496aba995340251c393 into c88df1b2303e41e49fe8d4323c240e75(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:59,759 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:27:59,759 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/A, priority=13, startTime=1733891278899; duration=0sec 2024-12-11T04:27:59,759 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:59,759 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:A 2024-12-11T04:27:59,763 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9dbedb23b89b4496aba995340251c393/C of 9dbedb23b89b4496aba995340251c393 into ec80a362ae4147cfb108d1aec3152e4d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:27:59,763 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:27:59,763 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/C, priority=13, startTime=1733891278900; duration=0sec 2024-12-11T04:27:59,763 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:27:59,763 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:C 2024-12-11T04:27:59,792 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/58410824898f4228a1fbcbe61ea5b1ee 2024-12-11T04:27:59,799 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/e1584d0957c447b1addbfe77b0736374 is 50, key is test_row_0/B:col10/1733891278854/Put/seqid=0 2024-12-11T04:27:59,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742219_1395 (size=12001) 2024-12-11T04:28:00,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:00,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891340103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:00,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:00,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891340110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:00,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891340111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:00,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:00,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891340113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:00,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:00,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891340116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:00,203 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/e1584d0957c447b1addbfe77b0736374 2024-12-11T04:28:00,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/021c3aedea0a419884dc630093177cae is 50, key is test_row_0/C:col10/1733891278854/Put/seqid=0 2024-12-11T04:28:00,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742220_1396 (size=12001) 2024-12-11T04:28:00,214 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/021c3aedea0a419884dc630093177cae 2024-12-11T04:28:00,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/58410824898f4228a1fbcbe61ea5b1ee as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/58410824898f4228a1fbcbe61ea5b1ee 2024-12-11T04:28:00,222 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/58410824898f4228a1fbcbe61ea5b1ee, entries=200, sequenceid=79, filesize=38.6 K 2024-12-11T04:28:00,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/e1584d0957c447b1addbfe77b0736374 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/e1584d0957c447b1addbfe77b0736374 2024-12-11T04:28:00,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/e1584d0957c447b1addbfe77b0736374, entries=150, sequenceid=79, filesize=11.7 K 2024-12-11T04:28:00,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/021c3aedea0a419884dc630093177cae as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/021c3aedea0a419884dc630093177cae 2024-12-11T04:28:00,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/021c3aedea0a419884dc630093177cae, entries=150, sequenceid=79, filesize=11.7 K 2024-12-11T04:28:00,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 9dbedb23b89b4496aba995340251c393 in 1268ms, sequenceid=79, compaction requested=false 2024-12-11T04:28:00,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:00,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-11T04:28:00,433 INFO [Thread-1693 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-11T04:28:00,434 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:00,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-11T04:28:00,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-11T04:28:00,436 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:00,437 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:00,437 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:00,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-11T04:28:00,588 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:00,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-11T04:28:00,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:00,589 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T04:28:00,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:00,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:00,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:00,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:00,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:00,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:00,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121164689ff70dac420fba37ca1693f427f6_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891278977/Put/seqid=0 2024-12-11T04:28:00,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742221_1397 (size=12154) 2024-12-11T04:28:00,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:00,604 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121164689ff70dac420fba37ca1693f427f6_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121164689ff70dac420fba37ca1693f427f6_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:00,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/dd5878b848f64419a21ced534c86a2e5, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:00,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/dd5878b848f64419a21ced534c86a2e5 is 175, key is test_row_0/A:col10/1733891278977/Put/seqid=0 2024-12-11T04:28:00,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742222_1398 (size=30955) 2024-12-11T04:28:00,609 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/dd5878b848f64419a21ced534c86a2e5 2024-12-11T04:28:00,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/6323632cd2f144f08fd0c063c19b00bc is 50, key is test_row_0/B:col10/1733891278977/Put/seqid=0 2024-12-11T04:28:00,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742223_1399 (size=12001) 2024-12-11T04:28:00,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-11T04:28:01,023 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/6323632cd2f144f08fd0c063c19b00bc 2024-12-11T04:28:01,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/f9e098f42b8a4c44ba2931b332bc10c0 is 50, key is test_row_0/C:col10/1733891278977/Put/seqid=0 2024-12-11T04:28:01,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742224_1400 (size=12001) 2024-12-11T04:28:01,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-11T04:28:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:01,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:01,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891341172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891341173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891341176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891341178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891341179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891341280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891341280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891341281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891341282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891341287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,434 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/f9e098f42b8a4c44ba2931b332bc10c0 2024-12-11T04:28:01,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/dd5878b848f64419a21ced534c86a2e5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/dd5878b848f64419a21ced534c86a2e5 2024-12-11T04:28:01,442 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/dd5878b848f64419a21ced534c86a2e5, entries=150, sequenceid=95, filesize=30.2 K 2024-12-11T04:28:01,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/6323632cd2f144f08fd0c063c19b00bc as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/6323632cd2f144f08fd0c063c19b00bc 2024-12-11T04:28:01,447 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/6323632cd2f144f08fd0c063c19b00bc, entries=150, sequenceid=95, filesize=11.7 K 2024-12-11T04:28:01,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/f9e098f42b8a4c44ba2931b332bc10c0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/f9e098f42b8a4c44ba2931b332bc10c0 2024-12-11T04:28:01,451 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/f9e098f42b8a4c44ba2931b332bc10c0, entries=150, sequenceid=95, filesize=11.7 K 2024-12-11T04:28:01,452 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 9dbedb23b89b4496aba995340251c393 in 863ms, sequenceid=95, compaction requested=true 2024-12-11T04:28:01,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:01,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:01,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-11T04:28:01,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-11T04:28:01,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-11T04:28:01,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0160 sec 2024-12-11T04:28:01,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 1.0210 sec 2024-12-11T04:28:01,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:01,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:28:01,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:01,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:01,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:01,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:01,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:01,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:01,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e47378d56a6c4124ada9c21b95be582d_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891281491/Put/seqid=0 2024-12-11T04:28:01,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742225_1401 (size=12154) 2024-12-11T04:28:01,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891341499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891341499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891341500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891341500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891341506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-11T04:28:01,539 INFO [Thread-1693 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-11T04:28:01,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:01,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-11T04:28:01,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-11T04:28:01,542 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:01,542 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:01,542 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:01,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891341607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891341607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891341607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891341607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891341610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-11T04:28:01,694 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-11T04:28:01,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:01,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:01,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:01,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:01,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:01,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:01,741 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T04:28:01,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891341809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891341810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891341810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891341813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:01,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891341815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-11T04:28:01,847 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:01,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-11T04:28:01,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:01,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:01,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:01,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:01,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:01,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:01,904 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:01,908 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e47378d56a6c4124ada9c21b95be582d_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e47378d56a6c4124ada9c21b95be582d_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:01,909 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/e7cf6e6344c348359a2ce5282ae41a53, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:01,910 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/e7cf6e6344c348359a2ce5282ae41a53 is 175, key is test_row_0/A:col10/1733891281491/Put/seqid=0 2024-12-11T04:28:01,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742226_1402 (size=30955) 2024-12-11T04:28:02,000 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-11T04:28:02,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:02,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:02,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891342115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:02,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891342117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:02,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891342117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:02,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891342118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:02,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891342119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-11T04:28:02,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-11T04:28:02,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:02,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,154 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,307 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,308 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-11T04:28:02,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:02,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,308 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,315 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/e7cf6e6344c348359a2ce5282ae41a53 2024-12-11T04:28:02,323 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/c62a4df7ca9048a2bcd68bb5dbeec7f6 is 50, key is test_row_0/B:col10/1733891281491/Put/seqid=0 2024-12-11T04:28:02,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742227_1403 (size=12001) 2024-12-11T04:28:02,329 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/c62a4df7ca9048a2bcd68bb5dbeec7f6 2024-12-11T04:28:02,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/1a709d26b2274678bf85c48552adeb7c is 50, key is test_row_0/C:col10/1733891281491/Put/seqid=0 2024-12-11T04:28:02,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742228_1404 (size=12001) 2024-12-11T04:28:02,460 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-11T04:28:02,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:02,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,461 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,613 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-11T04:28:02,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:02,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,614 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:02,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:02,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891342618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:02,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891342625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,626 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:02,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891342625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:02,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891342626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,631 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:02,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891342626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-11T04:28:02,742 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/1a709d26b2274678bf85c48552adeb7c 2024-12-11T04:28:02,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/e7cf6e6344c348359a2ce5282ae41a53 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/e7cf6e6344c348359a2ce5282ae41a53 2024-12-11T04:28:02,751 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/e7cf6e6344c348359a2ce5282ae41a53, entries=150, sequenceid=120, filesize=30.2 K 2024-12-11T04:28:02,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/c62a4df7ca9048a2bcd68bb5dbeec7f6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/c62a4df7ca9048a2bcd68bb5dbeec7f6 2024-12-11T04:28:02,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/c62a4df7ca9048a2bcd68bb5dbeec7f6, entries=150, sequenceid=120, filesize=11.7 K 2024-12-11T04:28:02,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/1a709d26b2274678bf85c48552adeb7c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/1a709d26b2274678bf85c48552adeb7c 2024-12-11T04:28:02,759 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/1a709d26b2274678bf85c48552adeb7c, entries=150, sequenceid=120, filesize=11.7 K 2024-12-11T04:28:02,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 9dbedb23b89b4496aba995340251c393 in 1269ms, sequenceid=120, compaction requested=true 2024-12-11T04:28:02,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:02,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:02,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:02,760 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:02,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:02,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:02,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:02,761 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:02,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:02,762 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:02,762 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132517 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:02,762 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/B is initiating minor compaction (all files) 2024-12-11T04:28:02,762 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/A is initiating minor compaction (all files) 2024-12-11T04:28:02,762 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/B in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,762 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/A in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,762 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/35dec10af7ef45888bca493a4aa46a4f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/e1584d0957c447b1addbfe77b0736374, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/6323632cd2f144f08fd0c063c19b00bc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/c62a4df7ca9048a2bcd68bb5dbeec7f6] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=47.0 K 2024-12-11T04:28:02,762 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/c88df1b2303e41e49fe8d4323c240e75, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/58410824898f4228a1fbcbe61ea5b1ee, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/dd5878b848f64419a21ced534c86a2e5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/e7cf6e6344c348359a2ce5282ae41a53] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=129.4 K 2024-12-11T04:28:02,762 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,762 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/c88df1b2303e41e49fe8d4323c240e75, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/58410824898f4228a1fbcbe61ea5b1ee, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/dd5878b848f64419a21ced534c86a2e5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/e7cf6e6344c348359a2ce5282ae41a53] 2024-12-11T04:28:02,762 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 35dec10af7ef45888bca493a4aa46a4f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733891278501 2024-12-11T04:28:02,763 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting c88df1b2303e41e49fe8d4323c240e75, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733891278501 2024-12-11T04:28:02,763 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e1584d0957c447b1addbfe77b0736374, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733891278854 2024-12-11T04:28:02,763 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58410824898f4228a1fbcbe61ea5b1ee, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733891278854 2024-12-11T04:28:02,763 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 6323632cd2f144f08fd0c063c19b00bc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733891278977 2024-12-11T04:28:02,763 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd5878b848f64419a21ced534c86a2e5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733891278977 2024-12-11T04:28:02,763 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting c62a4df7ca9048a2bcd68bb5dbeec7f6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733891281175 2024-12-11T04:28:02,764 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting e7cf6e6344c348359a2ce5282ae41a53, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733891281175 2024-12-11T04:28:02,766 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:02,766 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-11T04:28:02,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,766 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:28:02,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:02,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:02,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:02,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:02,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:02,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:02,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412114348cd7ac35545fb9f8bf2084bed4dce_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891281504/Put/seqid=0 2024-12-11T04:28:02,784 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:02,786 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#B#compaction#346 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:02,786 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/2e26aa8080a04c32bbb9ce9926619d5f is 50, key is test_row_0/B:col10/1733891281491/Put/seqid=0 2024-12-11T04:28:02,786 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211e84267a8922b40aca13f762c02c207b1_9dbedb23b89b4496aba995340251c393 store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:02,789 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211e84267a8922b40aca13f762c02c207b1_9dbedb23b89b4496aba995340251c393, store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:02,789 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e84267a8922b40aca13f762c02c207b1_9dbedb23b89b4496aba995340251c393 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:02,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742230_1406 (size=12241) 2024-12-11T04:28:02,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742229_1405 (size=12204) 2024-12-11T04:28:02,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:02,809 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412114348cd7ac35545fb9f8bf2084bed4dce_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114348cd7ac35545fb9f8bf2084bed4dce_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:02,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/033be885eec64adcb8add079624bdaa2, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:02,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/033be885eec64adcb8add079624bdaa2 is 175, key is test_row_0/A:col10/1733891281504/Put/seqid=0 2024-12-11T04:28:02,811 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/2e26aa8080a04c32bbb9ce9926619d5f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2e26aa8080a04c32bbb9ce9926619d5f 2024-12-11T04:28:02,817 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9dbedb23b89b4496aba995340251c393/B of 9dbedb23b89b4496aba995340251c393 into 2e26aa8080a04c32bbb9ce9926619d5f(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:02,817 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:02,817 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/B, priority=12, startTime=1733891282760; duration=0sec 2024-12-11T04:28:02,817 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:02,817 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:B 2024-12-11T04:28:02,817 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:02,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742231_1407 (size=4469) 2024-12-11T04:28:02,819 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:02,819 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/C is initiating minor compaction (all files) 2024-12-11T04:28:02,819 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/C in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:02,819 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/ec80a362ae4147cfb108d1aec3152e4d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/021c3aedea0a419884dc630093177cae, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/f9e098f42b8a4c44ba2931b332bc10c0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/1a709d26b2274678bf85c48552adeb7c] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=47.0 K 2024-12-11T04:28:02,819 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ec80a362ae4147cfb108d1aec3152e4d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1733891278501 2024-12-11T04:28:02,819 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 021c3aedea0a419884dc630093177cae, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733891278854 2024-12-11T04:28:02,820 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting f9e098f42b8a4c44ba2931b332bc10c0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733891278977 2024-12-11T04:28:02,820 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a709d26b2274678bf85c48552adeb7c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733891281175 2024-12-11T04:28:02,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742232_1408 (size=31005) 2024-12-11T04:28:02,831 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#C#compaction#348 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:02,832 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/a9c41982f63d4648bc00d7c518e8b948 is 50, key is test_row_0/C:col10/1733891281491/Put/seqid=0 2024-12-11T04:28:02,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742233_1409 (size=12241) 2024-12-11T04:28:02,844 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/a9c41982f63d4648bc00d7c518e8b948 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/a9c41982f63d4648bc00d7c518e8b948 2024-12-11T04:28:02,848 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9dbedb23b89b4496aba995340251c393/C of 9dbedb23b89b4496aba995340251c393 into a9c41982f63d4648bc00d7c518e8b948(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:02,848 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:02,848 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/C, priority=12, startTime=1733891282761; duration=0sec 2024-12-11T04:28:02,849 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:02,849 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:C 2024-12-11T04:28:03,219 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#A#compaction#347 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:03,219 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/3981e2e0de4e41628932b2bf4df518ba is 175, key is test_row_0/A:col10/1733891281491/Put/seqid=0 2024-12-11T04:28:03,224 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=131, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/033be885eec64adcb8add079624bdaa2 2024-12-11T04:28:03,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/87d6bd9816664ccdb50a52552e6d6307 is 50, key is test_row_0/B:col10/1733891281504/Put/seqid=0 2024-12-11T04:28:03,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742234_1410 (size=31195) 2024-12-11T04:28:03,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742235_1411 (size=12051) 2024-12-11T04:28:03,267 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/87d6bd9816664ccdb50a52552e6d6307 2024-12-11T04:28:03,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/37214230ddac46208fab5c8373bc43d6 is 50, key is test_row_0/C:col10/1733891281504/Put/seqid=0 2024-12-11T04:28:03,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742236_1412 (size=12051) 2024-12-11T04:28:03,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:03,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:03,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-11T04:28:03,647 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/3981e2e0de4e41628932b2bf4df518ba as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/3981e2e0de4e41628932b2bf4df518ba 2024-12-11T04:28:03,651 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9dbedb23b89b4496aba995340251c393/A of 9dbedb23b89b4496aba995340251c393 into 3981e2e0de4e41628932b2bf4df518ba(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:03,651 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:03,651 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/A, priority=12, startTime=1733891282760; duration=0sec 2024-12-11T04:28:03,651 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:03,651 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:A 2024-12-11T04:28:03,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891343661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891343661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891343662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891343664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891343662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,683 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/37214230ddac46208fab5c8373bc43d6 2024-12-11T04:28:03,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/033be885eec64adcb8add079624bdaa2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/033be885eec64adcb8add079624bdaa2 2024-12-11T04:28:03,692 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/033be885eec64adcb8add079624bdaa2, entries=150, sequenceid=131, filesize=30.3 K 2024-12-11T04:28:03,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/87d6bd9816664ccdb50a52552e6d6307 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/87d6bd9816664ccdb50a52552e6d6307 2024-12-11T04:28:03,696 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/87d6bd9816664ccdb50a52552e6d6307, entries=150, sequenceid=131, filesize=11.8 K 2024-12-11T04:28:03,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/37214230ddac46208fab5c8373bc43d6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/37214230ddac46208fab5c8373bc43d6 2024-12-11T04:28:03,700 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/37214230ddac46208fab5c8373bc43d6, entries=150, sequenceid=131, filesize=11.8 K 2024-12-11T04:28:03,701 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 9dbedb23b89b4496aba995340251c393 in 935ms, sequenceid=131, compaction requested=false 2024-12-11T04:28:03,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:03,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:03,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-11T04:28:03,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-11T04:28:03,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-11T04:28:03,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1610 sec 2024-12-11T04:28:03,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 2.1630 sec 2024-12-11T04:28:03,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:03,770 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-11T04:28:03,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:03,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:03,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:03,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:03,770 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:03,771 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:03,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211caf4f98c509b4e1da1f2944e88291a90_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891283769/Put/seqid=0 2024-12-11T04:28:03,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891343775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891343778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891343778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891343778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891343782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742237_1413 (size=14794) 2024-12-11T04:28:03,793 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:03,797 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211caf4f98c509b4e1da1f2944e88291a90_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211caf4f98c509b4e1da1f2944e88291a90_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:03,797 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/015fea55851141329e6389dca134f4ef, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:03,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/015fea55851141329e6389dca134f4ef is 175, key is test_row_0/A:col10/1733891283769/Put/seqid=0 2024-12-11T04:28:03,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742238_1414 (size=39749) 2024-12-11T04:28:03,804 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/015fea55851141329e6389dca134f4ef 2024-12-11T04:28:03,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/2eec791d21bf43d99f7320745dc7525a is 50, key is test_row_0/B:col10/1733891283769/Put/seqid=0 2024-12-11T04:28:03,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742239_1415 (size=12151) 2024-12-11T04:28:03,818 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/2eec791d21bf43d99f7320745dc7525a 2024-12-11T04:28:03,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/6faa36ba75684afb9f6bc41ae12d702f is 50, key is test_row_0/C:col10/1733891283769/Put/seqid=0 2024-12-11T04:28:03,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742240_1416 (size=12151) 2024-12-11T04:28:03,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891343884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891343885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891343885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891343886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:03,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:03,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891343886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891344089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891344090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891344091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891344091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891344092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/6faa36ba75684afb9f6bc41ae12d702f 2024-12-11T04:28:04,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/015fea55851141329e6389dca134f4ef as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/015fea55851141329e6389dca134f4ef 2024-12-11T04:28:04,245 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/015fea55851141329e6389dca134f4ef, entries=200, sequenceid=161, filesize=38.8 K 2024-12-11T04:28:04,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/2eec791d21bf43d99f7320745dc7525a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2eec791d21bf43d99f7320745dc7525a 2024-12-11T04:28:04,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2eec791d21bf43d99f7320745dc7525a, entries=150, sequenceid=161, filesize=11.9 K 2024-12-11T04:28:04,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/6faa36ba75684afb9f6bc41ae12d702f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/6faa36ba75684afb9f6bc41ae12d702f 2024-12-11T04:28:04,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,252 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/6faa36ba75684afb9f6bc41ae12d702f, entries=150, sequenceid=161, filesize=11.9 K 2024-12-11T04:28:04,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,253 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 9dbedb23b89b4496aba995340251c393 in 483ms, sequenceid=161, compaction requested=true 2024-12-11T04:28:04,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:04,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:04,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:04,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,253 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:04,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:04,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:04,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:04,253 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:04,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:04,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,254 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101949 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:04,254 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:04,254 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/A is initiating minor compaction (all files) 2024-12-11T04:28:04,254 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/B is initiating minor compaction (all files) 2024-12-11T04:28:04,254 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/A in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:04,254 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/B in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:04,255 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/3981e2e0de4e41628932b2bf4df518ba, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/033be885eec64adcb8add079624bdaa2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/015fea55851141329e6389dca134f4ef] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=99.6 K 2024-12-11T04:28:04,255 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2e26aa8080a04c32bbb9ce9926619d5f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/87d6bd9816664ccdb50a52552e6d6307, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2eec791d21bf43d99f7320745dc7525a] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=35.6 K 2024-12-11T04:28:04,255 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:04,255 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/3981e2e0de4e41628932b2bf4df518ba, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/033be885eec64adcb8add079624bdaa2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/015fea55851141329e6389dca134f4ef] 2024-12-11T04:28:04,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,255 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e26aa8080a04c32bbb9ce9926619d5f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733891281175 2024-12-11T04:28:04,255 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3981e2e0de4e41628932b2bf4df518ba, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733891281175 2024-12-11T04:28:04,255 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 87d6bd9816664ccdb50a52552e6d6307, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733891281498 2024-12-11T04:28:04,255 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 033be885eec64adcb8add079624bdaa2, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733891281498 2024-12-11T04:28:04,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,256 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 2eec791d21bf43d99f7320745dc7525a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733891283654 2024-12-11T04:28:04,256 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 015fea55851141329e6389dca134f4ef, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733891283654 2024-12-11T04:28:04,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,261 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:04,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,269 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#B#compaction#355 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:04,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,270 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/35b36f8249484a28ae535aa9da8fbcb5 is 50, key is test_row_0/B:col10/1733891283769/Put/seqid=0 2024-12-11T04:28:04,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,280 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121167eb907026f64d3e8855af7d1e0063af_9dbedb23b89b4496aba995340251c393 store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:04,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,282 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121167eb907026f64d3e8855af7d1e0063af_9dbedb23b89b4496aba995340251c393, store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:04,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,283 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121167eb907026f64d3e8855af7d1e0063af_9dbedb23b89b4496aba995340251c393 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:04,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742241_1417 (size=12493) 2024-12-11T04:28:04,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,305 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/35b36f8249484a28ae535aa9da8fbcb5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/35b36f8249484a28ae535aa9da8fbcb5 2024-12-11T04:28:04,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,309 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9dbedb23b89b4496aba995340251c393/B of 9dbedb23b89b4496aba995340251c393 into 35b36f8249484a28ae535aa9da8fbcb5(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:04,309 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:04,309 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/B, priority=13, startTime=1733891284253; duration=0sec 2024-12-11T04:28:04,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,310 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:04,310 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:B 2024-12-11T04:28:04,310 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:04,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,311 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:04,311 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/C is initiating minor compaction (all files) 2024-12-11T04:28:04,311 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/C in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:04,311 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/a9c41982f63d4648bc00d7c518e8b948, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/37214230ddac46208fab5c8373bc43d6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/6faa36ba75684afb9f6bc41ae12d702f] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=35.6 K 2024-12-11T04:28:04,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,311 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting a9c41982f63d4648bc00d7c518e8b948, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1733891281175 2024-12-11T04:28:04,312 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 37214230ddac46208fab5c8373bc43d6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1733891281498 2024-12-11T04:28:04,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,312 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 6faa36ba75684afb9f6bc41ae12d702f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733891283654 2024-12-11T04:28:04,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,319 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#C#compaction#356 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:04,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,320 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/bd53271219954aaca960b053d019ed3e is 50, key is test_row_0/C:col10/1733891283769/Put/seqid=0 2024-12-11T04:28:04,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742242_1418 (size=4469) 2024-12-11T04:28:04,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,324 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#A#compaction#354 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:04,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,324 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/cfdbfaeec7174a0ba445c9b42bf3d70d is 175, key is test_row_0/A:col10/1733891283769/Put/seqid=0 2024-12-11T04:28:04,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742243_1419 (size=12493) 2024-12-11T04:28:04,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,359 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/bd53271219954aaca960b053d019ed3e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/bd53271219954aaca960b053d019ed3e 2024-12-11T04:28:04,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,363 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9dbedb23b89b4496aba995340251c393/C of 9dbedb23b89b4496aba995340251c393 into bd53271219954aaca960b053d019ed3e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:04,363 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:04,363 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/C, priority=13, startTime=1733891284253; duration=0sec 2024-12-11T04:28:04,363 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:04,363 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:C 2024-12-11T04:28:04,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742244_1420 (size=31447) 2024-12-11T04:28:04,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:04,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:28:04,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:04,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:04,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:04,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:04,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:04,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:04,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,422 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211ef31453777ed4bac826237163f958859_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891284405/Put/seqid=0 2024-12-11T04:28:04,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742245_1421 (size=17284) 2024-12-11T04:28:04,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,428 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,433 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211ef31453777ed4bac826237163f958859_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211ef31453777ed4bac826237163f958859_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:04,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,435 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/80093efab46841d4a6ae32c451059f5a, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:04,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/80093efab46841d4a6ae32c451059f5a is 175, key is test_row_0/A:col10/1733891284405/Put/seqid=0 2024-12-11T04:28:04,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:04,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742246_1422 (size=48389) 2024-12-11T04:28:04,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891344462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891344463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891344465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891344467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891344468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891344569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,574 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891344569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891344571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891344575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891344575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891344775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891344776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891344776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891344780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:04,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891344780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:04,798 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/cfdbfaeec7174a0ba445c9b42bf3d70d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/cfdbfaeec7174a0ba445c9b42bf3d70d 2024-12-11T04:28:04,802 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9dbedb23b89b4496aba995340251c393/A of 9dbedb23b89b4496aba995340251c393 into cfdbfaeec7174a0ba445c9b42bf3d70d(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:04,802 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:04,802 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/A, priority=13, startTime=1733891284253; duration=0sec 2024-12-11T04:28:04,802 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:04,802 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:A 2024-12-11T04:28:04,862 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/80093efab46841d4a6ae32c451059f5a 2024-12-11T04:28:04,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/623d4dad09c541938944c9fe83eae190 is 50, key is test_row_0/B:col10/1733891284405/Put/seqid=0 2024-12-11T04:28:04,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742247_1423 (size=12151) 2024-12-11T04:28:05,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:05,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891345080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:05,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:05,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891345081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:05,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:05,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891345081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:05,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:05,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891345085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:05,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:05,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891345085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:05,273 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/623d4dad09c541938944c9fe83eae190 2024-12-11T04:28:05,279 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/94399002f5a1455f87278a3641c6e045 is 50, key is test_row_0/C:col10/1733891284405/Put/seqid=0 2024-12-11T04:28:05,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742248_1424 (size=12151) 2024-12-11T04:28:05,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:05,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891345586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:05,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:05,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891345588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:05,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:05,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891345589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:05,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:05,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891345590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:05,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891345591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:05,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-11T04:28:05,647 INFO [Thread-1693 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-11T04:28:05,648 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-12-11T04:28:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-11T04:28:05,649 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:05,653 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:05,653 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:05,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/94399002f5a1455f87278a3641c6e045 2024-12-11T04:28:05,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/80093efab46841d4a6ae32c451059f5a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/80093efab46841d4a6ae32c451059f5a 2024-12-11T04:28:05,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/80093efab46841d4a6ae32c451059f5a, entries=250, sequenceid=175, filesize=47.3 K 2024-12-11T04:28:05,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/623d4dad09c541938944c9fe83eae190 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/623d4dad09c541938944c9fe83eae190 2024-12-11T04:28:05,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/623d4dad09c541938944c9fe83eae190, entries=150, sequenceid=175, filesize=11.9 K 2024-12-11T04:28:05,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/94399002f5a1455f87278a3641c6e045 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/94399002f5a1455f87278a3641c6e045 2024-12-11T04:28:05,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/94399002f5a1455f87278a3641c6e045, entries=150, sequenceid=175, filesize=11.9 K 2024-12-11T04:28:05,699 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 9dbedb23b89b4496aba995340251c393 in 1290ms, sequenceid=175, compaction requested=false 2024-12-11T04:28:05,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-11T04:28:05,804 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:05,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-11T04:28:05,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:05,805 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:28:05,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:05,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:05,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:05,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:05,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:05,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:05,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110e1543bd40a5461ebace53e01abd5283_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891284466/Put/seqid=0 2024-12-11T04:28:05,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742249_1425 (size=12304) 2024-12-11T04:28:05,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-11T04:28:06,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:06,220 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110e1543bd40a5461ebace53e01abd5283_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110e1543bd40a5461ebace53e01abd5283_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:06,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/35e27520903743329249ada43d007b5e, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:06,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/35e27520903743329249ada43d007b5e is 175, key is test_row_0/A:col10/1733891284466/Put/seqid=0 2024-12-11T04:28:06,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742250_1426 (size=31105) 2024-12-11T04:28:06,225 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=200, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/35e27520903743329249ada43d007b5e 2024-12-11T04:28:06,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/f7582870620d49859863e99508ea2e85 is 50, key is test_row_0/B:col10/1733891284466/Put/seqid=0 2024-12-11T04:28:06,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-11T04:28:06,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742251_1427 (size=12151) 2024-12-11T04:28:06,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:06,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:06,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891346606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891346609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891346609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891346610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891346610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,666 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/f7582870620d49859863e99508ea2e85 2024-12-11T04:28:06,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/e516fa16935142f5bf3db19a8f04066e is 50, key is test_row_0/C:col10/1733891284466/Put/seqid=0 2024-12-11T04:28:06,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742252_1428 (size=12151) 2024-12-11T04:28:06,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891346711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891346715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891346717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,719 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891346717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,720 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891346717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-11T04:28:06,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891346914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891346918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891346920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891346921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:06,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:06,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891346921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,076 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/e516fa16935142f5bf3db19a8f04066e 2024-12-11T04:28:07,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/35e27520903743329249ada43d007b5e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35e27520903743329249ada43d007b5e 2024-12-11T04:28:07,084 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35e27520903743329249ada43d007b5e, entries=150, sequenceid=200, filesize=30.4 K 2024-12-11T04:28:07,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/f7582870620d49859863e99508ea2e85 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/f7582870620d49859863e99508ea2e85 2024-12-11T04:28:07,089 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/f7582870620d49859863e99508ea2e85, entries=150, sequenceid=200, filesize=11.9 K 2024-12-11T04:28:07,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/e516fa16935142f5bf3db19a8f04066e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/e516fa16935142f5bf3db19a8f04066e 2024-12-11T04:28:07,094 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/e516fa16935142f5bf3db19a8f04066e, entries=150, sequenceid=200, filesize=11.9 K 2024-12-11T04:28:07,095 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for 9dbedb23b89b4496aba995340251c393 in 1289ms, sequenceid=200, compaction requested=true 2024-12-11T04:28:07,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:07,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:07,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-11T04:28:07,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-11T04:28:07,097 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-11T04:28:07,097 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4430 sec 2024-12-11T04:28:07,098 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.4490 sec 2024-12-11T04:28:07,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:07,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T04:28:07,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:07,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:07,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:07,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:07,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:07,226 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:07,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118c9aa60b13724fbc9a55cdca9294eb46_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891286609/Put/seqid=0 2024-12-11T04:28:07,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742253_1429 (size=14794) 2024-12-11T04:28:07,236 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:07,240 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412118c9aa60b13724fbc9a55cdca9294eb46_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118c9aa60b13724fbc9a55cdca9294eb46_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:07,241 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/35323d74dc7544ee8b50158e27dce776, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:07,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/35323d74dc7544ee8b50158e27dce776 is 175, key is test_row_0/A:col10/1733891286609/Put/seqid=0 2024-12-11T04:28:07,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742254_1430 (size=39749) 2024-12-11T04:28:07,261 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891347254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891347254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891347255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891347261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891347262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891347363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891347363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891347363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891347367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891347367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891347567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,572 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891347567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891347567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891347574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891347575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,647 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/35323d74dc7544ee8b50158e27dce776 2024-12-11T04:28:07,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/8487f9d6068640dfa0a7c7ef61fb2526 is 50, key is test_row_0/B:col10/1733891286609/Put/seqid=0 2024-12-11T04:28:07,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742255_1431 (size=12151) 2024-12-11T04:28:07,660 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/8487f9d6068640dfa0a7c7ef61fb2526 2024-12-11T04:28:07,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/b63cb9d647524ae7a8f586494e1a9909 is 50, key is test_row_0/C:col10/1733891286609/Put/seqid=0 2024-12-11T04:28:07,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742256_1432 (size=12151) 2024-12-11T04:28:07,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-11T04:28:07,753 INFO [Thread-1693 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-11T04:28:07,754 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:07,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-11T04:28:07,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-11T04:28:07,755 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:07,756 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:07,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:07,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-11T04:28:07,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891347874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891347875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,877 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891347875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891347883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:07,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891347884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,907 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:07,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-11T04:28:07,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:07,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:07,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:07,908 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:07,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:07,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:08,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-11T04:28:08,060 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-11T04:28:08,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:08,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:08,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:08,061 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:08,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:08,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:08,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/b63cb9d647524ae7a8f586494e1a9909 2024-12-11T04:28:08,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/35323d74dc7544ee8b50158e27dce776 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35323d74dc7544ee8b50158e27dce776 2024-12-11T04:28:08,087 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35323d74dc7544ee8b50158e27dce776, entries=200, sequenceid=215, filesize=38.8 K 2024-12-11T04:28:08,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/8487f9d6068640dfa0a7c7ef61fb2526 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/8487f9d6068640dfa0a7c7ef61fb2526 2024-12-11T04:28:08,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/8487f9d6068640dfa0a7c7ef61fb2526, entries=150, sequenceid=215, filesize=11.9 K 2024-12-11T04:28:08,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/b63cb9d647524ae7a8f586494e1a9909 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/b63cb9d647524ae7a8f586494e1a9909 2024-12-11T04:28:08,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/b63cb9d647524ae7a8f586494e1a9909, entries=150, sequenceid=215, filesize=11.9 K 2024-12-11T04:28:08,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 9dbedb23b89b4496aba995340251c393 in 869ms, sequenceid=215, compaction requested=true 2024-12-11T04:28:08,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:08,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:08,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:08,096 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:08,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:08,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:08,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:08,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:08,096 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:08,097 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 150690 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:08,097 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/A is initiating minor compaction (all files) 2024-12-11T04:28:08,097 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:08,097 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/A in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:08,097 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/B is initiating minor compaction (all files) 2024-12-11T04:28:08,097 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/B in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:08,098 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/cfdbfaeec7174a0ba445c9b42bf3d70d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/80093efab46841d4a6ae32c451059f5a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35e27520903743329249ada43d007b5e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35323d74dc7544ee8b50158e27dce776] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=147.2 K 2024-12-11T04:28:08,098 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:08,098 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/35b36f8249484a28ae535aa9da8fbcb5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/623d4dad09c541938944c9fe83eae190, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/f7582870620d49859863e99508ea2e85, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/8487f9d6068640dfa0a7c7ef61fb2526] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=47.8 K 2024-12-11T04:28:08,098 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/cfdbfaeec7174a0ba445c9b42bf3d70d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/80093efab46841d4a6ae32c451059f5a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35e27520903743329249ada43d007b5e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35323d74dc7544ee8b50158e27dce776] 2024-12-11T04:28:08,098 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 35b36f8249484a28ae535aa9da8fbcb5, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733891283654 2024-12-11T04:28:08,098 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 623d4dad09c541938944c9fe83eae190, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733891284405 2024-12-11T04:28:08,099 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting f7582870620d49859863e99508ea2e85, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733891284461 2024-12-11T04:28:08,099 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfdbfaeec7174a0ba445c9b42bf3d70d, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733891283654 2024-12-11T04:28:08,099 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 8487f9d6068640dfa0a7c7ef61fb2526, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733891286604 2024-12-11T04:28:08,099 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80093efab46841d4a6ae32c451059f5a, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733891283775 2024-12-11T04:28:08,100 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35e27520903743329249ada43d007b5e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733891284461 2024-12-11T04:28:08,100 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35323d74dc7544ee8b50158e27dce776, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733891286604 2024-12-11T04:28:08,107 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:08,108 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#B#compaction#366 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:08,108 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121127379c9aaeb5443a9cb18cb08ea6c679_9dbedb23b89b4496aba995340251c393 store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:08,108 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/355f0cabb13b4a4c8c8a5dff7f8462d8 is 50, key is test_row_0/B:col10/1733891286609/Put/seqid=0 2024-12-11T04:28:08,110 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121127379c9aaeb5443a9cb18cb08ea6c679_9dbedb23b89b4496aba995340251c393, store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:08,111 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121127379c9aaeb5443a9cb18cb08ea6c679_9dbedb23b89b4496aba995340251c393 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:08,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742257_1433 (size=12629) 2024-12-11T04:28:08,119 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/355f0cabb13b4a4c8c8a5dff7f8462d8 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/355f0cabb13b4a4c8c8a5dff7f8462d8 2024-12-11T04:28:08,126 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9dbedb23b89b4496aba995340251c393/B of 9dbedb23b89b4496aba995340251c393 into 355f0cabb13b4a4c8c8a5dff7f8462d8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:08,126 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:08,126 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/B, priority=12, startTime=1733891288096; duration=0sec 2024-12-11T04:28:08,126 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:08,126 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:B 2024-12-11T04:28:08,126 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:08,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742258_1434 (size=4469) 2024-12-11T04:28:08,128 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:08,128 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/C is initiating minor compaction (all files) 2024-12-11T04:28:08,128 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/C in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:08,129 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#A#compaction#367 average throughput is 1.11 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:08,129 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/5981468b5d7a4b0aa4a008068adfe2ba is 175, key is test_row_0/A:col10/1733891286609/Put/seqid=0 2024-12-11T04:28:08,130 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/bd53271219954aaca960b053d019ed3e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/94399002f5a1455f87278a3641c6e045, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/e516fa16935142f5bf3db19a8f04066e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/b63cb9d647524ae7a8f586494e1a9909] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=47.8 K 2024-12-11T04:28:08,131 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting bd53271219954aaca960b053d019ed3e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1733891283654 2024-12-11T04:28:08,132 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 94399002f5a1455f87278a3641c6e045, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733891284405 2024-12-11T04:28:08,133 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e516fa16935142f5bf3db19a8f04066e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733891284461 2024-12-11T04:28:08,133 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting b63cb9d647524ae7a8f586494e1a9909, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733891286604 2024-12-11T04:28:08,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742259_1435 (size=31583) 2024-12-11T04:28:08,150 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#C#compaction#368 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:08,151 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/8038bb22b20f4744a2d5ab7b43ba97d9 is 50, key is test_row_0/C:col10/1733891286609/Put/seqid=0 2024-12-11T04:28:08,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742260_1436 (size=12629) 2024-12-11T04:28:08,213 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-11T04:28:08,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:08,214 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T04:28:08,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:08,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:08,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:08,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:08,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:08,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:08,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121181b71790a5e54e3cb2016ed467944621_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891287251/Put/seqid=0 2024-12-11T04:28:08,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742261_1437 (size=12304) 2024-12-11T04:28:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-11T04:28:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:08,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:08,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891348397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891348397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891348398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891348399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,407 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891348402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891348506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891348506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891348507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891348508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891348508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,550 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/5981468b5d7a4b0aa4a008068adfe2ba as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/5981468b5d7a4b0aa4a008068adfe2ba 2024-12-11T04:28:08,560 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9dbedb23b89b4496aba995340251c393/A of 9dbedb23b89b4496aba995340251c393 into 5981468b5d7a4b0aa4a008068adfe2ba(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:08,560 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:08,560 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/A, priority=12, startTime=1733891288096; duration=0sec 2024-12-11T04:28:08,560 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:08,560 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:A 2024-12-11T04:28:08,561 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/8038bb22b20f4744a2d5ab7b43ba97d9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/8038bb22b20f4744a2d5ab7b43ba97d9 2024-12-11T04:28:08,565 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9dbedb23b89b4496aba995340251c393/C of 9dbedb23b89b4496aba995340251c393 into 8038bb22b20f4744a2d5ab7b43ba97d9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:08,565 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:08,565 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/C, priority=12, startTime=1733891288096; duration=0sec 2024-12-11T04:28:08,565 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:08,565 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:C 2024-12-11T04:28:08,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:08,628 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121181b71790a5e54e3cb2016ed467944621_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121181b71790a5e54e3cb2016ed467944621_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:08,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/35ef2666b7334579ad764dacb6e345f3, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:08,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/35ef2666b7334579ad764dacb6e345f3 is 175, key is test_row_0/A:col10/1733891287251/Put/seqid=0 2024-12-11T04:28:08,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742262_1438 (size=31105) 2024-12-11T04:28:08,634 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/35ef2666b7334579ad764dacb6e345f3 2024-12-11T04:28:08,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/351d6c558162439389e2e4f28f54b61d is 50, key is test_row_0/B:col10/1733891287251/Put/seqid=0 2024-12-11T04:28:08,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742263_1439 (size=12151) 2024-12-11T04:28:08,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891348713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891348713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891348714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891348714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:08,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891348714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:08,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-11T04:28:09,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891349016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891349018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891349019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891349020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891349020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,044 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/351d6c558162439389e2e4f28f54b61d 2024-12-11T04:28:09,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/c6e1fdfc27164653a88e30a11ec2e6d6 is 50, key is test_row_0/C:col10/1733891287251/Put/seqid=0 2024-12-11T04:28:09,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742264_1440 (size=12151) 2024-12-11T04:28:09,055 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/c6e1fdfc27164653a88e30a11ec2e6d6 2024-12-11T04:28:09,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/35ef2666b7334579ad764dacb6e345f3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35ef2666b7334579ad764dacb6e345f3 2024-12-11T04:28:09,062 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35ef2666b7334579ad764dacb6e345f3, entries=150, sequenceid=238, filesize=30.4 K 2024-12-11T04:28:09,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/351d6c558162439389e2e4f28f54b61d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/351d6c558162439389e2e4f28f54b61d 2024-12-11T04:28:09,066 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/351d6c558162439389e2e4f28f54b61d, entries=150, sequenceid=238, filesize=11.9 K 2024-12-11T04:28:09,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/c6e1fdfc27164653a88e30a11ec2e6d6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/c6e1fdfc27164653a88e30a11ec2e6d6 2024-12-11T04:28:09,070 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/c6e1fdfc27164653a88e30a11ec2e6d6, entries=150, sequenceid=238, filesize=11.9 K 2024-12-11T04:28:09,071 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 9dbedb23b89b4496aba995340251c393 in 858ms, sequenceid=238, compaction requested=false 2024-12-11T04:28:09,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:09,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:09,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-11T04:28:09,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-11T04:28:09,074 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-11T04:28:09,074 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3160 sec 2024-12-11T04:28:09,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.3210 sec 2024-12-11T04:28:09,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:09,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T04:28:09,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:09,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:09,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:09,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:09,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:09,526 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:09,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e29baae971714fbeb29ad2a81dd07d84_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891288401/Put/seqid=0 2024-12-11T04:28:09,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742265_1441 (size=12304) 2024-12-11T04:28:09,539 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:09,542 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e29baae971714fbeb29ad2a81dd07d84_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e29baae971714fbeb29ad2a81dd07d84_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:09,542 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/641d9d923162419aaf6f53a61433b84f, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:09,543 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/641d9d923162419aaf6f53a61433b84f is 175, key is test_row_0/A:col10/1733891288401/Put/seqid=0 2024-12-11T04:28:09,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742266_1442 (size=31105) 2024-12-11T04:28:09,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891349547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891349552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891349552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891349553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891349555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891349656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891349660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891349661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891349662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891349664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-11T04:28:09,859 INFO [Thread-1693 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-11T04:28:09,860 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:09,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-11T04:28:09,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-11T04:28:09,861 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:09,863 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:09,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:09,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891349862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891349865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891349866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891349867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:09,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891349867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:09,947 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/641d9d923162419aaf6f53a61433b84f 2024-12-11T04:28:09,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/2125abb4d8174438b132c627622330ae is 50, key is test_row_0/B:col10/1733891288401/Put/seqid=0 2024-12-11T04:28:09,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742267_1443 (size=12151) 2024-12-11T04:28:09,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-11T04:28:10,015 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T04:28:10,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:10,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,016 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-11T04:28:10,168 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T04:28:10,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:10,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:10,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891350168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:10,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891350170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:10,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891350171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:10,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891350171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:10,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891350171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,321 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T04:28:10,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:10,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,362 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/2125abb4d8174438b132c627622330ae 2024-12-11T04:28:10,370 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/d9d8a2152d2149eca6aa222ee48ecc5d is 50, key is test_row_0/C:col10/1733891288401/Put/seqid=0 2024-12-11T04:28:10,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742268_1444 (size=12151) 2024-12-11T04:28:10,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-11T04:28:10,474 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T04:28:10,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:10,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,626 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T04:28:10,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:10,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:10,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891350674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:10,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891350674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:10,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891350675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:10,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891350677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:10,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891350678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/d9d8a2152d2149eca6aa222ee48ecc5d 2024-12-11T04:28:10,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/641d9d923162419aaf6f53a61433b84f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/641d9d923162419aaf6f53a61433b84f 2024-12-11T04:28:10,780 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T04:28:10,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:10,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,780 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:10,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/641d9d923162419aaf6f53a61433b84f, entries=150, sequenceid=255, filesize=30.4 K 2024-12-11T04:28:10,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/2125abb4d8174438b132c627622330ae as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2125abb4d8174438b132c627622330ae 2024-12-11T04:28:10,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2125abb4d8174438b132c627622330ae, entries=150, sequenceid=255, filesize=11.9 K 2024-12-11T04:28:10,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/d9d8a2152d2149eca6aa222ee48ecc5d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/d9d8a2152d2149eca6aa222ee48ecc5d 2024-12-11T04:28:10,791 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/d9d8a2152d2149eca6aa222ee48ecc5d, entries=150, sequenceid=255, filesize=11.9 K 2024-12-11T04:28:10,792 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 9dbedb23b89b4496aba995340251c393 in 1265ms, sequenceid=255, compaction requested=true 2024-12-11T04:28:10,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:10,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:10,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:10,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:10,792 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:10,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:10,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:10,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:10,792 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:10,793 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93793 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:10,793 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:10,793 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/A is initiating minor compaction (all files) 2024-12-11T04:28:10,793 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/B is initiating minor compaction (all files) 2024-12-11T04:28:10,793 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/A in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,793 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/B in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,793 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/5981468b5d7a4b0aa4a008068adfe2ba, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35ef2666b7334579ad764dacb6e345f3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/641d9d923162419aaf6f53a61433b84f] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=91.6 K 2024-12-11T04:28:10,793 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/355f0cabb13b4a4c8c8a5dff7f8462d8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/351d6c558162439389e2e4f28f54b61d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2125abb4d8174438b132c627622330ae] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=36.1 K 2024-12-11T04:28:10,793 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,793 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/5981468b5d7a4b0aa4a008068adfe2ba, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35ef2666b7334579ad764dacb6e345f3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/641d9d923162419aaf6f53a61433b84f] 2024-12-11T04:28:10,793 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 355f0cabb13b4a4c8c8a5dff7f8462d8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733891286604 2024-12-11T04:28:10,793 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5981468b5d7a4b0aa4a008068adfe2ba, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733891286604 2024-12-11T04:28:10,794 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 351d6c558162439389e2e4f28f54b61d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733891287251 2024-12-11T04:28:10,794 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35ef2666b7334579ad764dacb6e345f3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733891287251 2024-12-11T04:28:10,794 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 2125abb4d8174438b132c627622330ae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733891288391 2024-12-11T04:28:10,794 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 641d9d923162419aaf6f53a61433b84f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733891288391 2024-12-11T04:28:10,800 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:10,802 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#B#compaction#375 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:10,802 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121139f91665f4114c3796a4fd5cdc1e3090_9dbedb23b89b4496aba995340251c393 store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:10,803 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/82239fade5d943fdbc9a8795286a63b8 is 50, key is test_row_0/B:col10/1733891288401/Put/seqid=0 2024-12-11T04:28:10,804 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121139f91665f4114c3796a4fd5cdc1e3090_9dbedb23b89b4496aba995340251c393, store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:10,804 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121139f91665f4114c3796a4fd5cdc1e3090_9dbedb23b89b4496aba995340251c393 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:10,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742269_1445 (size=12731) 2024-12-11T04:28:10,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742270_1446 (size=4469) 2024-12-11T04:28:10,813 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#A#compaction#376 average throughput is 1.88 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:10,814 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/bbe6aa2604a74859a0a2a6756fa8281a is 175, key is test_row_0/A:col10/1733891288401/Put/seqid=0 2024-12-11T04:28:10,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742271_1447 (size=31685) 2024-12-11T04:28:10,822 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/bbe6aa2604a74859a0a2a6756fa8281a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bbe6aa2604a74859a0a2a6756fa8281a 2024-12-11T04:28:10,826 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9dbedb23b89b4496aba995340251c393/A of 9dbedb23b89b4496aba995340251c393 into bbe6aa2604a74859a0a2a6756fa8281a(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:10,826 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:10,826 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/A, priority=13, startTime=1733891290792; duration=0sec 2024-12-11T04:28:10,826 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:10,826 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:A 2024-12-11T04:28:10,826 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:10,827 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:10,827 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/C is initiating minor compaction (all files) 2024-12-11T04:28:10,827 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/C in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,827 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/8038bb22b20f4744a2d5ab7b43ba97d9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/c6e1fdfc27164653a88e30a11ec2e6d6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/d9d8a2152d2149eca6aa222ee48ecc5d] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=36.1 K 2024-12-11T04:28:10,827 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8038bb22b20f4744a2d5ab7b43ba97d9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1733891286604 2024-12-11T04:28:10,827 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6e1fdfc27164653a88e30a11ec2e6d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733891287251 2024-12-11T04:28:10,828 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9d8a2152d2149eca6aa222ee48ecc5d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733891288391 2024-12-11T04:28:10,833 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#C#compaction#377 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:10,833 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/fb98429873b2411594c961ae5bcc06e3 is 50, key is test_row_0/C:col10/1733891288401/Put/seqid=0 2024-12-11T04:28:10,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742272_1448 (size=12731) 2024-12-11T04:28:10,841 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/fb98429873b2411594c961ae5bcc06e3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/fb98429873b2411594c961ae5bcc06e3 2024-12-11T04:28:10,852 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9dbedb23b89b4496aba995340251c393/C of 9dbedb23b89b4496aba995340251c393 into fb98429873b2411594c961ae5bcc06e3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:10,852 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:10,852 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/C, priority=13, startTime=1733891290792; duration=0sec 2024-12-11T04:28:10,852 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:10,852 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:C 2024-12-11T04:28:10,933 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:10,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-11T04:28:10,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:10,933 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-11T04:28:10,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:10,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:10,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:10,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:10,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:10,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:10,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412116f6dd09893044921bde4464503301cd4_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891289544/Put/seqid=0 2024-12-11T04:28:10,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742273_1449 (size=12454) 2024-12-11T04:28:10,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-11T04:28:11,216 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/82239fade5d943fdbc9a8795286a63b8 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/82239fade5d943fdbc9a8795286a63b8 2024-12-11T04:28:11,220 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9dbedb23b89b4496aba995340251c393/B of 9dbedb23b89b4496aba995340251c393 into 82239fade5d943fdbc9a8795286a63b8(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:11,220 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:11,220 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/B, priority=13, startTime=1733891290792; duration=0sec 2024-12-11T04:28:11,221 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:11,221 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:B 2024-12-11T04:28:11,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:11,349 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412116f6dd09893044921bde4464503301cd4_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412116f6dd09893044921bde4464503301cd4_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:11,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/9f166846df1b42fb83ad067dcd57a739, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:11,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/9f166846df1b42fb83ad067dcd57a739 is 175, key is test_row_0/A:col10/1733891289544/Put/seqid=0 2024-12-11T04:28:11,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742274_1450 (size=31255) 2024-12-11T04:28:11,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:11,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:11,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891351690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:11,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891351692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:11,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891351692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:11,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891351693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:11,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891351693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:11,755 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/9f166846df1b42fb83ad067dcd57a739 2024-12-11T04:28:11,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/0b42a0418e274751b3729ad39d958bf1 is 50, key is test_row_0/B:col10/1733891289544/Put/seqid=0 2024-12-11T04:28:11,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742275_1451 (size=12301) 2024-12-11T04:28:11,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:11,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891351794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:11,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891351801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:11,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891351801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:11,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891351801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:11,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:11,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891351801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:11,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-11T04:28:12,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891352000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891352007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891352008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891352008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891352009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,171 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/0b42a0418e274751b3729ad39d958bf1 2024-12-11T04:28:12,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/63dc56f667aa409aa64c530c85aee6c1 is 50, key is test_row_0/C:col10/1733891289544/Put/seqid=0 2024-12-11T04:28:12,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742276_1452 (size=12301) 2024-12-11T04:28:12,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891352307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891352311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891352311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891352312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891352313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,583 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/63dc56f667aa409aa64c530c85aee6c1 2024-12-11T04:28:12,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/9f166846df1b42fb83ad067dcd57a739 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/9f166846df1b42fb83ad067dcd57a739 2024-12-11T04:28:12,593 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/9f166846df1b42fb83ad067dcd57a739, entries=150, sequenceid=278, filesize=30.5 K 2024-12-11T04:28:12,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/0b42a0418e274751b3729ad39d958bf1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/0b42a0418e274751b3729ad39d958bf1 2024-12-11T04:28:12,600 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/0b42a0418e274751b3729ad39d958bf1, entries=150, sequenceid=278, filesize=12.0 K 2024-12-11T04:28:12,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/63dc56f667aa409aa64c530c85aee6c1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/63dc56f667aa409aa64c530c85aee6c1 2024-12-11T04:28:12,604 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/63dc56f667aa409aa64c530c85aee6c1, entries=150, sequenceid=278, filesize=12.0 K 2024-12-11T04:28:12,605 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 9dbedb23b89b4496aba995340251c393 in 1672ms, sequenceid=278, compaction requested=false 2024-12-11T04:28:12,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:12,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:12,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-11T04:28:12,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-11T04:28:12,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-11T04:28:12,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7430 sec 2024-12-11T04:28:12,609 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 2.7480 sec 2024-12-11T04:28:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:12,813 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-11T04:28:12,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:12,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:12,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:12,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:12,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:12,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:12,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211bd589bfc3867499ea3ee19badaa15bad_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891291691/Put/seqid=0 2024-12-11T04:28:12,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742277_1453 (size=14994) 2024-12-11T04:28:12,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891352842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891352842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891352843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891352844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891352845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,954 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891352951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891352952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891352952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,955 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891352953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:12,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:12,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891352953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:13,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891353155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:13,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:13,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891353156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891353156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:13,160 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891353156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:13,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:13,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891353157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:13,227 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:13,230 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211bd589bfc3867499ea3ee19badaa15bad_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211bd589bfc3867499ea3ee19badaa15bad_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:13,231 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/bf6fbd9f90454072af74f693d3cb793d, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:13,232 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/bf6fbd9f90454072af74f693d3cb793d is 175, key is test_row_0/A:col10/1733891291691/Put/seqid=0 2024-12-11T04:28:13,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742278_1454 (size=39949) 2024-12-11T04:28:13,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:13,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891353462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:13,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891353462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:13,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891353462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:13,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891353462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:13,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:13,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891353463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:13,637 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/bf6fbd9f90454072af74f693d3cb793d 2024-12-11T04:28:13,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/e98e13b4f2804680a194cebc2c77da26 is 50, key is test_row_0/B:col10/1733891291691/Put/seqid=0 2024-12-11T04:28:13,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742279_1455 (size=12301) 2024-12-11T04:28:13,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/e98e13b4f2804680a194cebc2c77da26 2024-12-11T04:28:13,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/7de60f57e7f04e948b064295390bf635 is 50, key is test_row_0/C:col10/1733891291691/Put/seqid=0 2024-12-11T04:28:13,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742280_1456 (size=12301) 2024-12-11T04:28:13,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/7de60f57e7f04e948b064295390bf635 2024-12-11T04:28:13,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/bf6fbd9f90454072af74f693d3cb793d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bf6fbd9f90454072af74f693d3cb793d 2024-12-11T04:28:13,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bf6fbd9f90454072af74f693d3cb793d, entries=200, sequenceid=295, filesize=39.0 K 2024-12-11T04:28:13,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/e98e13b4f2804680a194cebc2c77da26 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/e98e13b4f2804680a194cebc2c77da26 2024-12-11T04:28:13,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/e98e13b4f2804680a194cebc2c77da26, entries=150, sequenceid=295, filesize=12.0 K 2024-12-11T04:28:13,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/7de60f57e7f04e948b064295390bf635 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/7de60f57e7f04e948b064295390bf635 2024-12-11T04:28:13,699 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/7de60f57e7f04e948b064295390bf635, entries=150, sequenceid=295, filesize=12.0 K 2024-12-11T04:28:13,700 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 9dbedb23b89b4496aba995340251c393 in 888ms, sequenceid=295, compaction requested=true 2024-12-11T04:28:13,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:13,701 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:13,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:13,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:13,701 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:13,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:13,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:13,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:13,701 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:13,702 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:13,702 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/A is initiating minor compaction (all files) 2024-12-11T04:28:13,702 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/A in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:13,702 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bbe6aa2604a74859a0a2a6756fa8281a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/9f166846df1b42fb83ad067dcd57a739, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bf6fbd9f90454072af74f693d3cb793d] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=100.5 K 2024-12-11T04:28:13,702 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:13,702 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:13,702 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bbe6aa2604a74859a0a2a6756fa8281a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/9f166846df1b42fb83ad067dcd57a739, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bf6fbd9f90454072af74f693d3cb793d] 2024-12-11T04:28:13,702 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/B is initiating minor compaction (all files) 2024-12-11T04:28:13,702 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/B in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:13,702 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/82239fade5d943fdbc9a8795286a63b8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/0b42a0418e274751b3729ad39d958bf1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/e98e13b4f2804680a194cebc2c77da26] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=36.5 K 2024-12-11T04:28:13,703 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbe6aa2604a74859a0a2a6756fa8281a, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733891288391 2024-12-11T04:28:13,703 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 82239fade5d943fdbc9a8795286a63b8, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733891288391 2024-12-11T04:28:13,703 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f166846df1b42fb83ad067dcd57a739, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733891289544 2024-12-11T04:28:13,703 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b42a0418e274751b3729ad39d958bf1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733891289544 2024-12-11T04:28:13,704 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf6fbd9f90454072af74f693d3cb793d, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733891291690 2024-12-11T04:28:13,704 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e98e13b4f2804680a194cebc2c77da26, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733891291691 2024-12-11T04:28:13,713 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#B#compaction#384 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:13,714 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/5d5f74e8ed444156a816be9923067101 is 50, key is test_row_0/B:col10/1733891291691/Put/seqid=0 2024-12-11T04:28:13,715 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:13,724 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121144f08be7fbd7499abf642462b4d74296_9dbedb23b89b4496aba995340251c393 store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:13,727 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121144f08be7fbd7499abf642462b4d74296_9dbedb23b89b4496aba995340251c393, store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:13,727 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121144f08be7fbd7499abf642462b4d74296_9dbedb23b89b4496aba995340251c393 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:13,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742281_1457 (size=12983) 2024-12-11T04:28:13,735 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/5d5f74e8ed444156a816be9923067101 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/5d5f74e8ed444156a816be9923067101 2024-12-11T04:28:13,741 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9dbedb23b89b4496aba995340251c393/B of 9dbedb23b89b4496aba995340251c393 into 5d5f74e8ed444156a816be9923067101(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:13,741 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:13,741 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/B, priority=13, startTime=1733891293701; duration=0sec 2024-12-11T04:28:13,741 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:13,741 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:B 2024-12-11T04:28:13,741 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:13,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742282_1458 (size=4469) 2024-12-11T04:28:13,744 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:13,744 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 9dbedb23b89b4496aba995340251c393/C is initiating minor compaction (all files) 2024-12-11T04:28:13,744 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9dbedb23b89b4496aba995340251c393/C in TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:13,745 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/fb98429873b2411594c961ae5bcc06e3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/63dc56f667aa409aa64c530c85aee6c1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/7de60f57e7f04e948b064295390bf635] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp, totalSize=36.5 K 2024-12-11T04:28:13,746 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#A#compaction#385 average throughput is 0.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:13,746 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/68e7a8846bbc4d839bfdeaa458d37e70 is 175, key is test_row_0/A:col10/1733891291691/Put/seqid=0 2024-12-11T04:28:13,748 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting fb98429873b2411594c961ae5bcc06e3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733891288391 2024-12-11T04:28:13,750 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 63dc56f667aa409aa64c530c85aee6c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733891289544 2024-12-11T04:28:13,751 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 7de60f57e7f04e948b064295390bf635, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733891291691 2024-12-11T04:28:13,759 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9dbedb23b89b4496aba995340251c393#C#compaction#386 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:13,759 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/cfbd7473bffb469581db4f598cbbf149 is 50, key is test_row_0/C:col10/1733891291691/Put/seqid=0 2024-12-11T04:28:13,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742283_1459 (size=31937) 2024-12-11T04:28:13,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742284_1460 (size=12983) 2024-12-11T04:28:13,774 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/cfbd7473bffb469581db4f598cbbf149 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/cfbd7473bffb469581db4f598cbbf149 2024-12-11T04:28:13,783 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9dbedb23b89b4496aba995340251c393/C of 9dbedb23b89b4496aba995340251c393 into cfbd7473bffb469581db4f598cbbf149(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:13,783 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:13,783 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/C, priority=13, startTime=1733891293701; duration=0sec 2024-12-11T04:28:13,783 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:13,783 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:C 2024-12-11T04:28:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-11T04:28:13,966 INFO [Thread-1693 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-11T04:28:13,968 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:13,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:13,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T04:28:13,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:13,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:13,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:13,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:13,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:13,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:13,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-11T04:28:13,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-11T04:28:13,971 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:13,971 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:13,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:13,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211b7fd102a5caf424ba39d260b26dc49fc_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891293967/Put/seqid=0 2024-12-11T04:28:13,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742285_1461 (size=12454) 2024-12-11T04:28:14,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891354017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891354019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891354020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891354020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891354018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-11T04:28:14,123 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-11T04:28:14,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:14,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:14,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:14,124 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891354125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891354126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891354126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891354126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891354127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,167 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/68e7a8846bbc4d839bfdeaa458d37e70 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/68e7a8846bbc4d839bfdeaa458d37e70 2024-12-11T04:28:14,172 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9dbedb23b89b4496aba995340251c393/A of 9dbedb23b89b4496aba995340251c393 into 68e7a8846bbc4d839bfdeaa458d37e70(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:14,172 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:14,172 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393., storeName=9dbedb23b89b4496aba995340251c393/A, priority=13, startTime=1733891293700; duration=0sec 2024-12-11T04:28:14,172 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:14,173 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:A 2024-12-11T04:28:14,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-11T04:28:14,276 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-11T04:28:14,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:14,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:14,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:14,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891354328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891354329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891354329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891354330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891354330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,386 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:14,389 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211b7fd102a5caf424ba39d260b26dc49fc_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211b7fd102a5caf424ba39d260b26dc49fc_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:14,390 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/f73b3952926b4614ae59c1e514ad2f35, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:14,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/f73b3952926b4614ae59c1e514ad2f35 is 175, key is test_row_0/A:col10/1733891293967/Put/seqid=0 2024-12-11T04:28:14,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742286_1462 (size=31255) 2024-12-11T04:28:14,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,429 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-11T04:28:14,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:14,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:14,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:14,430 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-11T04:28:14,582 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-11T04:28:14,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:14,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:14,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:14,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891354632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891354633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891354635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891354635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:14,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891354635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,734 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,735 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-11T04:28:14,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:14,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:14,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:14,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,796 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=319, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/f73b3952926b4614ae59c1e514ad2f35 2024-12-11T04:28:14,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/6d2c5526949a4828999dc7edea8651a1 is 50, key is test_row_0/B:col10/1733891293967/Put/seqid=0 2024-12-11T04:28:14,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742287_1463 (size=12301) 2024-12-11T04:28:14,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/6d2c5526949a4828999dc7edea8651a1 2024-12-11T04:28:14,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/de0f70e906f244388a7c9d12edfcb003 is 50, key is test_row_0/C:col10/1733891293967/Put/seqid=0 2024-12-11T04:28:14,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742288_1464 (size=12301) 2024-12-11T04:28:14,887 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:14,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-11T04:28:14,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:14,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:14,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:14,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:14,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:15,040 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:15,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-11T04:28:15,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:15,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:15,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:15,041 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:15,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:15,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:15,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-11T04:28:15,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:15,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891355136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:15,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:15,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891355138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:15,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:15,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891355142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:15,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:15,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891355142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:15,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:15,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891355144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:15,193 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:15,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-11T04:28:15,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:15,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:15,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:15,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:15,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:15,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:15,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/de0f70e906f244388a7c9d12edfcb003 2024-12-11T04:28:15,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/f73b3952926b4614ae59c1e514ad2f35 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/f73b3952926b4614ae59c1e514ad2f35 2024-12-11T04:28:15,225 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/f73b3952926b4614ae59c1e514ad2f35, entries=150, sequenceid=319, filesize=30.5 K 2024-12-11T04:28:15,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/6d2c5526949a4828999dc7edea8651a1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/6d2c5526949a4828999dc7edea8651a1 2024-12-11T04:28:15,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/6d2c5526949a4828999dc7edea8651a1, entries=150, sequenceid=319, filesize=12.0 K 2024-12-11T04:28:15,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/de0f70e906f244388a7c9d12edfcb003 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/de0f70e906f244388a7c9d12edfcb003 2024-12-11T04:28:15,233 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/de0f70e906f244388a7c9d12edfcb003, entries=150, sequenceid=319, filesize=12.0 K 2024-12-11T04:28:15,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 9dbedb23b89b4496aba995340251c393 in 1266ms, sequenceid=319, compaction requested=false 2024-12-11T04:28:15,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:15,347 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:15,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-11T04:28:15,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:15,348 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-11T04:28:15,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:15,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:15,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:15,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:15,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:15,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:15,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115e9c69027b594a94900ffd468e5567fb_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891294018/Put/seqid=0 2024-12-11T04:28:15,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742289_1465 (size=12454) 2024-12-11T04:28:15,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:15,767 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115e9c69027b594a94900ffd468e5567fb_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412115e9c69027b594a94900ffd468e5567fb_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:15,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/60c7db2a758a41feb52065ab7a399cf5, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:15,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/60c7db2a758a41feb52065ab7a399cf5 is 175, key is test_row_0/A:col10/1733891294018/Put/seqid=0 2024-12-11T04:28:15,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742290_1466 (size=31255) 2024-12-11T04:28:15,777 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=334, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/60c7db2a758a41feb52065ab7a399cf5 2024-12-11T04:28:15,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/9fe7a352ba5c4b7bbbe54c093af6afe2 is 50, key is test_row_0/B:col10/1733891294018/Put/seqid=0 2024-12-11T04:28:15,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742291_1467 (size=12301) 2024-12-11T04:28:16,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-11T04:28:16,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:16,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. as already flushing 2024-12-11T04:28:16,188 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/9fe7a352ba5c4b7bbbe54c093af6afe2 2024-12-11T04:28:16,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/ba51e08c3e0b4b24a4cea2086bd845a6 is 50, key is test_row_0/C:col10/1733891294018/Put/seqid=0 2024-12-11T04:28:16,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742292_1468 (size=12301) 2024-12-11T04:28:16,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891356200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891356200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891356200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891356201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891356201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891356306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891356307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891356307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891356307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891356308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,325 DEBUG [Thread-1694 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61ec0f48 to 127.0.0.1:50078 2024-12-11T04:28:16,325 DEBUG [Thread-1694 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:16,328 DEBUG [Thread-1702 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3cb726fe to 127.0.0.1:50078 2024-12-11T04:28:16,328 DEBUG [Thread-1702 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:16,329 DEBUG [Thread-1696 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7819b9e2 to 127.0.0.1:50078 2024-12-11T04:28:16,329 DEBUG [Thread-1696 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:16,330 DEBUG [Thread-1698 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47679076 to 127.0.0.1:50078 2024-12-11T04:28:16,330 DEBUG [Thread-1698 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:16,333 DEBUG [Thread-1700 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4cb9e50e to 127.0.0.1:50078 2024-12-11T04:28:16,333 DEBUG [Thread-1700 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:16,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44982 deadline: 1733891356511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44998 deadline: 1733891356512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44980 deadline: 1733891356512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45020 deadline: 1733891356518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:16,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:44988 deadline: 1733891356519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:16,598 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/ba51e08c3e0b4b24a4cea2086bd845a6 2024-12-11T04:28:16,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/60c7db2a758a41feb52065ab7a399cf5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/60c7db2a758a41feb52065ab7a399cf5 2024-12-11T04:28:16,605 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/60c7db2a758a41feb52065ab7a399cf5, entries=150, sequenceid=334, filesize=30.5 K 2024-12-11T04:28:16,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/9fe7a352ba5c4b7bbbe54c093af6afe2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/9fe7a352ba5c4b7bbbe54c093af6afe2 2024-12-11T04:28:16,608 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/9fe7a352ba5c4b7bbbe54c093af6afe2, entries=150, sequenceid=334, filesize=12.0 K 2024-12-11T04:28:16,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/ba51e08c3e0b4b24a4cea2086bd845a6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/ba51e08c3e0b4b24a4cea2086bd845a6 2024-12-11T04:28:16,611 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/ba51e08c3e0b4b24a4cea2086bd845a6, entries=150, sequenceid=334, filesize=12.0 K 2024-12-11T04:28:16,612 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 9dbedb23b89b4496aba995340251c393 in 1264ms, sequenceid=334, compaction requested=true 2024-12-11T04:28:16,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:16,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:16,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-11T04:28:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-11T04:28:16,614 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-11T04:28:16,614 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6410 sec 2024-12-11T04:28:16,615 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 2.6460 sec 2024-12-11T04:28:16,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:16,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:28:16,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:16,814 DEBUG [Thread-1683 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x49e13594 to 127.0.0.1:50078 2024-12-11T04:28:16,814 DEBUG [Thread-1683 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:16,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:16,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:16,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:16,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:16,815 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:16,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121123356c4b656e414d8cde8a3472353781_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891296166/Put/seqid=0 2024-12-11T04:28:16,821 DEBUG [Thread-1685 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c54a0d3 to 127.0.0.1:50078 2024-12-11T04:28:16,821 DEBUG [Thread-1691 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x27539bdc to 127.0.0.1:50078 2024-12-11T04:28:16,821 DEBUG [Thread-1689 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0801ba40 to 127.0.0.1:50078 2024-12-11T04:28:16,821 DEBUG [Thread-1685 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:16,821 DEBUG [Thread-1689 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:16,821 DEBUG [Thread-1691 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:16,822 DEBUG [Thread-1687 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3875c8c5 to 127.0.0.1:50078 2024-12-11T04:28:16,822 DEBUG [Thread-1687 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:16,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742293_1469 (size=12454) 2024-12-11T04:28:17,225 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:17,228 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121123356c4b656e414d8cde8a3472353781_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121123356c4b656e414d8cde8a3472353781_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:17,229 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/4dcc1427d918493b875ab9bab3b4acf5, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:17,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/4dcc1427d918493b875ab9bab3b4acf5 is 175, key is test_row_0/A:col10/1733891296166/Put/seqid=0 2024-12-11T04:28:17,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742294_1470 (size=31255) 2024-12-11T04:28:17,633 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=358, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/4dcc1427d918493b875ab9bab3b4acf5 2024-12-11T04:28:17,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/9b59497e70b7442cbebf82a30ed7df66 is 50, key is test_row_0/B:col10/1733891296166/Put/seqid=0 2024-12-11T04:28:17,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742295_1471 (size=12301) 2024-12-11T04:28:18,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/9b59497e70b7442cbebf82a30ed7df66 2024-12-11T04:28:18,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/d3573c4579ee4c8f868c5e1a348ead1b is 50, key is test_row_0/C:col10/1733891296166/Put/seqid=0 2024-12-11T04:28:18,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742296_1472 (size=12301) 2024-12-11T04:28:18,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-11T04:28:18,075 INFO [Thread-1693 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2731 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8192 rows 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2722 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8165 rows 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2718 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8153 rows 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2731 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8191 rows 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2741 2024-12-11T04:28:18,075 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8222 rows 2024-12-11T04:28:18,075 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T04:28:18,075 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62f74604 to 127.0.0.1:50078 2024-12-11T04:28:18,075 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:18,077 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-11T04:28:18,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-11T04:28:18,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:18,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-11T04:28:18,081 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891298081"}]},"ts":"1733891298081"} 2024-12-11T04:28:18,082 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-11T04:28:18,085 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-11T04:28:18,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T04:28:18,086 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9dbedb23b89b4496aba995340251c393, UNASSIGN}] 2024-12-11T04:28:18,087 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9dbedb23b89b4496aba995340251c393, UNASSIGN 2024-12-11T04:28:18,087 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=9dbedb23b89b4496aba995340251c393, regionState=CLOSING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:18,088 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T04:28:18,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; CloseRegionProcedure 9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:28:18,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-11T04:28:18,239 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:18,240 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:18,240 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T04:28:18,240 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing 9dbedb23b89b4496aba995340251c393, disabling compactions & flushes 2024-12-11T04:28:18,240 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:18,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-11T04:28:18,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/d3573c4579ee4c8f868c5e1a348ead1b 2024-12-11T04:28:18,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/4dcc1427d918493b875ab9bab3b4acf5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/4dcc1427d918493b875ab9bab3b4acf5 2024-12-11T04:28:18,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/4dcc1427d918493b875ab9bab3b4acf5, entries=150, sequenceid=358, filesize=30.5 K 2024-12-11T04:28:18,458 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/9b59497e70b7442cbebf82a30ed7df66 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/9b59497e70b7442cbebf82a30ed7df66 2024-12-11T04:28:18,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/9b59497e70b7442cbebf82a30ed7df66, entries=150, sequenceid=358, filesize=12.0 K 2024-12-11T04:28:18,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/d3573c4579ee4c8f868c5e1a348ead1b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/d3573c4579ee4c8f868c5e1a348ead1b 2024-12-11T04:28:18,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/d3573c4579ee4c8f868c5e1a348ead1b, entries=150, sequenceid=358, filesize=12.0 K 2024-12-11T04:28:18,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=26.84 KB/27480 for 9dbedb23b89b4496aba995340251c393 in 1650ms, sequenceid=358, compaction requested=true 2024-12-11T04:28:18,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:18,464 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:18,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:18,464 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:18,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:18,464 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. after waiting 0 ms 2024-12-11T04:28:18,464 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. because compaction request was cancelled 2024-12-11T04:28:18,465 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:18,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:18,465 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:A 2024-12-11T04:28:18,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:18,465 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. because compaction request was cancelled 2024-12-11T04:28:18,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9dbedb23b89b4496aba995340251c393:C, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:18,465 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:B 2024-12-11T04:28:18,465 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. because compaction request was cancelled 2024-12-11T04:28:18,465 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(2837): Flushing 9dbedb23b89b4496aba995340251c393 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-11T04:28:18,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:18,465 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9dbedb23b89b4496aba995340251c393:C 2024-12-11T04:28:18,465 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=A 2024-12-11T04:28:18,465 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:18,465 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=B 2024-12-11T04:28:18,465 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:18,465 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9dbedb23b89b4496aba995340251c393, store=C 2024-12-11T04:28:18,465 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:18,469 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211b0c85f5a0ec1476e97af54a49e07c4f4_9dbedb23b89b4496aba995340251c393 is 50, key is test_row_0/A:col10/1733891296821/Put/seqid=0 2024-12-11T04:28:18,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742297_1473 (size=12454) 2024-12-11T04:28:18,618 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T04:28:18,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-11T04:28:18,873 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:18,876 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211b0c85f5a0ec1476e97af54a49e07c4f4_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211b0c85f5a0ec1476e97af54a49e07c4f4_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:18,877 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/03148b3d9e57492986ee872f755bfb62, store: [table=TestAcidGuarantees family=A region=9dbedb23b89b4496aba995340251c393] 2024-12-11T04:28:18,877 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/03148b3d9e57492986ee872f755bfb62 is 175, key is test_row_0/A:col10/1733891296821/Put/seqid=0 2024-12-11T04:28:18,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742298_1474 (size=31255) 2024-12-11T04:28:19,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-11T04:28:19,281 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=365, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/03148b3d9e57492986ee872f755bfb62 2024-12-11T04:28:19,286 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/de436bee2e1044c3b4ee8db1207ea191 is 50, key is test_row_0/B:col10/1733891296821/Put/seqid=0 2024-12-11T04:28:19,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742299_1475 (size=12301) 2024-12-11T04:28:19,690 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/de436bee2e1044c3b4ee8db1207ea191 2024-12-11T04:28:19,696 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/0de29be8b15141d4814a08e830d0f423 is 50, key is test_row_0/C:col10/1733891296821/Put/seqid=0 2024-12-11T04:28:19,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742300_1476 (size=12301) 2024-12-11T04:28:20,099 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/0de29be8b15141d4814a08e830d0f423 2024-12-11T04:28:20,102 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/A/03148b3d9e57492986ee872f755bfb62 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/03148b3d9e57492986ee872f755bfb62 2024-12-11T04:28:20,105 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/03148b3d9e57492986ee872f755bfb62, entries=150, sequenceid=365, filesize=30.5 K 2024-12-11T04:28:20,105 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/B/de436bee2e1044c3b4ee8db1207ea191 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/de436bee2e1044c3b4ee8db1207ea191 2024-12-11T04:28:20,108 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/de436bee2e1044c3b4ee8db1207ea191, entries=150, sequenceid=365, filesize=12.0 K 2024-12-11T04:28:20,108 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/.tmp/C/0de29be8b15141d4814a08e830d0f423 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/0de29be8b15141d4814a08e830d0f423 2024-12-11T04:28:20,111 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/0de29be8b15141d4814a08e830d0f423, entries=150, sequenceid=365, filesize=12.0 K 2024-12-11T04:28:20,112 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 9dbedb23b89b4496aba995340251c393 in 1647ms, sequenceid=365, compaction requested=true 2024-12-11T04:28:20,112 DEBUG [StoreCloser-TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/215977e3fc384d1cbdc0f368ec9fd434, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/22c3698675d8466a8911378b9ce89d26, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/0bf240d3a1c543dd86f5c494835d6fef, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/c88df1b2303e41e49fe8d4323c240e75, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/58410824898f4228a1fbcbe61ea5b1ee, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/dd5878b848f64419a21ced534c86a2e5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/3981e2e0de4e41628932b2bf4df518ba, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/e7cf6e6344c348359a2ce5282ae41a53, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/033be885eec64adcb8add079624bdaa2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/015fea55851141329e6389dca134f4ef, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/cfdbfaeec7174a0ba445c9b42bf3d70d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/80093efab46841d4a6ae32c451059f5a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35e27520903743329249ada43d007b5e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35323d74dc7544ee8b50158e27dce776, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/5981468b5d7a4b0aa4a008068adfe2ba, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35ef2666b7334579ad764dacb6e345f3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bbe6aa2604a74859a0a2a6756fa8281a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/641d9d923162419aaf6f53a61433b84f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/9f166846df1b42fb83ad067dcd57a739, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bf6fbd9f90454072af74f693d3cb793d] to archive 2024-12-11T04:28:20,113 DEBUG [StoreCloser-TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:28:20,115 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/0bf240d3a1c543dd86f5c494835d6fef to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/0bf240d3a1c543dd86f5c494835d6fef 2024-12-11T04:28:20,115 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/215977e3fc384d1cbdc0f368ec9fd434 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/215977e3fc384d1cbdc0f368ec9fd434 2024-12-11T04:28:20,115 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/58410824898f4228a1fbcbe61ea5b1ee to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/58410824898f4228a1fbcbe61ea5b1ee 2024-12-11T04:28:20,115 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/22c3698675d8466a8911378b9ce89d26 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/22c3698675d8466a8911378b9ce89d26 2024-12-11T04:28:20,115 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/c88df1b2303e41e49fe8d4323c240e75 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/c88df1b2303e41e49fe8d4323c240e75 2024-12-11T04:28:20,115 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/dd5878b848f64419a21ced534c86a2e5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/dd5878b848f64419a21ced534c86a2e5 2024-12-11T04:28:20,115 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/3981e2e0de4e41628932b2bf4df518ba to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/3981e2e0de4e41628932b2bf4df518ba 2024-12-11T04:28:20,116 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/e7cf6e6344c348359a2ce5282ae41a53 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/e7cf6e6344c348359a2ce5282ae41a53 2024-12-11T04:28:20,118 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/033be885eec64adcb8add079624bdaa2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/033be885eec64adcb8add079624bdaa2 2024-12-11T04:28:20,118 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/015fea55851141329e6389dca134f4ef to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/015fea55851141329e6389dca134f4ef 2024-12-11T04:28:20,118 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/cfdbfaeec7174a0ba445c9b42bf3d70d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/cfdbfaeec7174a0ba445c9b42bf3d70d 2024-12-11T04:28:20,118 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/5981468b5d7a4b0aa4a008068adfe2ba to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/5981468b5d7a4b0aa4a008068adfe2ba 2024-12-11T04:28:20,118 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/80093efab46841d4a6ae32c451059f5a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/80093efab46841d4a6ae32c451059f5a 2024-12-11T04:28:20,118 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35ef2666b7334579ad764dacb6e345f3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35ef2666b7334579ad764dacb6e345f3 2024-12-11T04:28:20,118 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35e27520903743329249ada43d007b5e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35e27520903743329249ada43d007b5e 2024-12-11T04:28:20,118 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35323d74dc7544ee8b50158e27dce776 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/35323d74dc7544ee8b50158e27dce776 2024-12-11T04:28:20,119 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/641d9d923162419aaf6f53a61433b84f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/641d9d923162419aaf6f53a61433b84f 2024-12-11T04:28:20,119 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bf6fbd9f90454072af74f693d3cb793d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bf6fbd9f90454072af74f693d3cb793d 2024-12-11T04:28:20,119 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/9f166846df1b42fb83ad067dcd57a739 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/9f166846df1b42fb83ad067dcd57a739 2024-12-11T04:28:20,119 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bbe6aa2604a74859a0a2a6756fa8281a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/bbe6aa2604a74859a0a2a6756fa8281a 2024-12-11T04:28:20,120 DEBUG [StoreCloser-TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/06380b79b0874ac8b2c49d1e31bcc466, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/77d6f8db740a46639986b6f181caa194, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/35dec10af7ef45888bca493a4aa46a4f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/0c85f7bf5b9d4f5fae3955ac151bbada, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/e1584d0957c447b1addbfe77b0736374, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/6323632cd2f144f08fd0c063c19b00bc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2e26aa8080a04c32bbb9ce9926619d5f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/c62a4df7ca9048a2bcd68bb5dbeec7f6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/87d6bd9816664ccdb50a52552e6d6307, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/35b36f8249484a28ae535aa9da8fbcb5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2eec791d21bf43d99f7320745dc7525a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/623d4dad09c541938944c9fe83eae190, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/f7582870620d49859863e99508ea2e85, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/355f0cabb13b4a4c8c8a5dff7f8462d8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/8487f9d6068640dfa0a7c7ef61fb2526, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/351d6c558162439389e2e4f28f54b61d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/82239fade5d943fdbc9a8795286a63b8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2125abb4d8174438b132c627622330ae, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/0b42a0418e274751b3729ad39d958bf1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/e98e13b4f2804680a194cebc2c77da26] to archive 2024-12-11T04:28:20,121 DEBUG [StoreCloser-TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:28:20,122 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/06380b79b0874ac8b2c49d1e31bcc466 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/06380b79b0874ac8b2c49d1e31bcc466 2024-12-11T04:28:20,122 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/6323632cd2f144f08fd0c063c19b00bc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/6323632cd2f144f08fd0c063c19b00bc 2024-12-11T04:28:20,122 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/77d6f8db740a46639986b6f181caa194 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/77d6f8db740a46639986b6f181caa194 2024-12-11T04:28:20,122 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/35dec10af7ef45888bca493a4aa46a4f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/35dec10af7ef45888bca493a4aa46a4f 2024-12-11T04:28:20,123 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/0c85f7bf5b9d4f5fae3955ac151bbada to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/0c85f7bf5b9d4f5fae3955ac151bbada 2024-12-11T04:28:20,123 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2e26aa8080a04c32bbb9ce9926619d5f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2e26aa8080a04c32bbb9ce9926619d5f 2024-12-11T04:28:20,123 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/c62a4df7ca9048a2bcd68bb5dbeec7f6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/c62a4df7ca9048a2bcd68bb5dbeec7f6 2024-12-11T04:28:20,123 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/e1584d0957c447b1addbfe77b0736374 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/e1584d0957c447b1addbfe77b0736374 2024-12-11T04:28:20,124 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/87d6bd9816664ccdb50a52552e6d6307 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/87d6bd9816664ccdb50a52552e6d6307 2024-12-11T04:28:20,124 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2eec791d21bf43d99f7320745dc7525a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2eec791d21bf43d99f7320745dc7525a 2024-12-11T04:28:20,124 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/623d4dad09c541938944c9fe83eae190 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/623d4dad09c541938944c9fe83eae190 2024-12-11T04:28:20,124 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/f7582870620d49859863e99508ea2e85 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/f7582870620d49859863e99508ea2e85 2024-12-11T04:28:20,124 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/355f0cabb13b4a4c8c8a5dff7f8462d8 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/355f0cabb13b4a4c8c8a5dff7f8462d8 2024-12-11T04:28:20,125 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/35b36f8249484a28ae535aa9da8fbcb5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/35b36f8249484a28ae535aa9da8fbcb5 2024-12-11T04:28:20,125 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/8487f9d6068640dfa0a7c7ef61fb2526 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/8487f9d6068640dfa0a7c7ef61fb2526 2024-12-11T04:28:20,126 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/351d6c558162439389e2e4f28f54b61d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/351d6c558162439389e2e4f28f54b61d 2024-12-11T04:28:20,126 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2125abb4d8174438b132c627622330ae to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/2125abb4d8174438b132c627622330ae 2024-12-11T04:28:20,126 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/82239fade5d943fdbc9a8795286a63b8 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/82239fade5d943fdbc9a8795286a63b8 2024-12-11T04:28:20,126 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/0b42a0418e274751b3729ad39d958bf1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/0b42a0418e274751b3729ad39d958bf1 2024-12-11T04:28:20,126 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/e98e13b4f2804680a194cebc2c77da26 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/e98e13b4f2804680a194cebc2c77da26 2024-12-11T04:28:20,127 DEBUG [StoreCloser-TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/71dc7e3d953b4e90b171cf1236c15b3f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/587b0526e2f343f6a8ffc3f7116aa483, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/ec80a362ae4147cfb108d1aec3152e4d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/aa56048ab6b243bfa39e00add912f90d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/021c3aedea0a419884dc630093177cae, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/f9e098f42b8a4c44ba2931b332bc10c0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/a9c41982f63d4648bc00d7c518e8b948, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/1a709d26b2274678bf85c48552adeb7c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/37214230ddac46208fab5c8373bc43d6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/bd53271219954aaca960b053d019ed3e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/6faa36ba75684afb9f6bc41ae12d702f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/94399002f5a1455f87278a3641c6e045, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/e516fa16935142f5bf3db19a8f04066e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/8038bb22b20f4744a2d5ab7b43ba97d9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/b63cb9d647524ae7a8f586494e1a9909, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/c6e1fdfc27164653a88e30a11ec2e6d6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/fb98429873b2411594c961ae5bcc06e3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/d9d8a2152d2149eca6aa222ee48ecc5d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/63dc56f667aa409aa64c530c85aee6c1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/7de60f57e7f04e948b064295390bf635] to archive 2024-12-11T04:28:20,127 DEBUG [StoreCloser-TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:28:20,129 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/587b0526e2f343f6a8ffc3f7116aa483 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/587b0526e2f343f6a8ffc3f7116aa483 2024-12-11T04:28:20,130 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/71dc7e3d953b4e90b171cf1236c15b3f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/71dc7e3d953b4e90b171cf1236c15b3f 2024-12-11T04:28:20,130 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/aa56048ab6b243bfa39e00add912f90d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/aa56048ab6b243bfa39e00add912f90d 2024-12-11T04:28:20,130 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/021c3aedea0a419884dc630093177cae to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/021c3aedea0a419884dc630093177cae 2024-12-11T04:28:20,130 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/ec80a362ae4147cfb108d1aec3152e4d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/ec80a362ae4147cfb108d1aec3152e4d 2024-12-11T04:28:20,130 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/1a709d26b2274678bf85c48552adeb7c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/1a709d26b2274678bf85c48552adeb7c 2024-12-11T04:28:20,130 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/f9e098f42b8a4c44ba2931b332bc10c0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/f9e098f42b8a4c44ba2931b332bc10c0 2024-12-11T04:28:20,130 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/a9c41982f63d4648bc00d7c518e8b948 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/a9c41982f63d4648bc00d7c518e8b948 2024-12-11T04:28:20,131 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/bd53271219954aaca960b053d019ed3e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/bd53271219954aaca960b053d019ed3e 2024-12-11T04:28:20,132 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/37214230ddac46208fab5c8373bc43d6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/37214230ddac46208fab5c8373bc43d6 2024-12-11T04:28:20,132 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/6faa36ba75684afb9f6bc41ae12d702f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/6faa36ba75684afb9f6bc41ae12d702f 2024-12-11T04:28:20,132 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/b63cb9d647524ae7a8f586494e1a9909 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/b63cb9d647524ae7a8f586494e1a9909 2024-12-11T04:28:20,132 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/94399002f5a1455f87278a3641c6e045 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/94399002f5a1455f87278a3641c6e045 2024-12-11T04:28:20,132 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/8038bb22b20f4744a2d5ab7b43ba97d9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/8038bb22b20f4744a2d5ab7b43ba97d9 2024-12-11T04:28:20,132 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/e516fa16935142f5bf3db19a8f04066e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/e516fa16935142f5bf3db19a8f04066e 2024-12-11T04:28:20,132 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/c6e1fdfc27164653a88e30a11ec2e6d6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/c6e1fdfc27164653a88e30a11ec2e6d6 2024-12-11T04:28:20,133 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/fb98429873b2411594c961ae5bcc06e3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/fb98429873b2411594c961ae5bcc06e3 2024-12-11T04:28:20,133 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/d9d8a2152d2149eca6aa222ee48ecc5d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/d9d8a2152d2149eca6aa222ee48ecc5d 2024-12-11T04:28:20,133 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/7de60f57e7f04e948b064295390bf635 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/7de60f57e7f04e948b064295390bf635 2024-12-11T04:28:20,133 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/63dc56f667aa409aa64c530c85aee6c1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/63dc56f667aa409aa64c530c85aee6c1 2024-12-11T04:28:20,136 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/recovered.edits/368.seqid, newMaxSeqId=368, maxSeqId=4 2024-12-11T04:28:20,137 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393. 2024-12-11T04:28:20,137 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for 9dbedb23b89b4496aba995340251c393: 2024-12-11T04:28:20,138 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed 9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,139 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=9dbedb23b89b4496aba995340251c393, regionState=CLOSED 2024-12-11T04:28:20,140 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-11T04:28:20,140 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; CloseRegionProcedure 9dbedb23b89b4496aba995340251c393, server=5f466b3719ec,39071,1733891180267 in 2.0510 sec 2024-12-11T04:28:20,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=132, resume processing ppid=131 2024-12-11T04:28:20,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=131, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9dbedb23b89b4496aba995340251c393, UNASSIGN in 2.0540 sec 2024-12-11T04:28:20,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-11T04:28:20,143 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.0570 sec 2024-12-11T04:28:20,143 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891300143"}]},"ts":"1733891300143"} 2024-12-11T04:28:20,144 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-11T04:28:20,146 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-11T04:28:20,147 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0700 sec 2024-12-11T04:28:20,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-11T04:28:20,184 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-11T04:28:20,184 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-11T04:28:20,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:20,185 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=134, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:20,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-11T04:28:20,186 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=134, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:20,187 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,188 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/recovered.edits] 2024-12-11T04:28:20,191 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/60c7db2a758a41feb52065ab7a399cf5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/60c7db2a758a41feb52065ab7a399cf5 2024-12-11T04:28:20,191 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/03148b3d9e57492986ee872f755bfb62 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/03148b3d9e57492986ee872f755bfb62 2024-12-11T04:28:20,191 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/4dcc1427d918493b875ab9bab3b4acf5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/4dcc1427d918493b875ab9bab3b4acf5 2024-12-11T04:28:20,191 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/68e7a8846bbc4d839bfdeaa458d37e70 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/68e7a8846bbc4d839bfdeaa458d37e70 2024-12-11T04:28:20,191 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/f73b3952926b4614ae59c1e514ad2f35 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/A/f73b3952926b4614ae59c1e514ad2f35 2024-12-11T04:28:20,194 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/5d5f74e8ed444156a816be9923067101 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/5d5f74e8ed444156a816be9923067101 2024-12-11T04:28:20,194 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/6d2c5526949a4828999dc7edea8651a1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/6d2c5526949a4828999dc7edea8651a1 2024-12-11T04:28:20,194 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/de436bee2e1044c3b4ee8db1207ea191 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/de436bee2e1044c3b4ee8db1207ea191 2024-12-11T04:28:20,194 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/9fe7a352ba5c4b7bbbe54c093af6afe2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/9fe7a352ba5c4b7bbbe54c093af6afe2 2024-12-11T04:28:20,194 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/9b59497e70b7442cbebf82a30ed7df66 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/B/9b59497e70b7442cbebf82a30ed7df66 2024-12-11T04:28:20,197 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/ba51e08c3e0b4b24a4cea2086bd845a6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/ba51e08c3e0b4b24a4cea2086bd845a6 2024-12-11T04:28:20,197 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/0de29be8b15141d4814a08e830d0f423 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/0de29be8b15141d4814a08e830d0f423 2024-12-11T04:28:20,197 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/cfbd7473bffb469581db4f598cbbf149 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/cfbd7473bffb469581db4f598cbbf149 2024-12-11T04:28:20,197 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/d3573c4579ee4c8f868c5e1a348ead1b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/d3573c4579ee4c8f868c5e1a348ead1b 2024-12-11T04:28:20,197 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/de0f70e906f244388a7c9d12edfcb003 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/C/de0f70e906f244388a7c9d12edfcb003 2024-12-11T04:28:20,199 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/recovered.edits/368.seqid to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393/recovered.edits/368.seqid 2024-12-11T04:28:20,200 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,200 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-11T04:28:20,200 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-11T04:28:20,201 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-11T04:28:20,206 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121121c5fa36fc53404cb4ce4dba4112bea6_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121121c5fa36fc53404cb4ce4dba4112bea6_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,207 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110e1543bd40a5461ebace53e01abd5283_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412110e1543bd40a5461ebace53e01abd5283_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,207 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121123356c4b656e414d8cde8a3472353781_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121123356c4b656e414d8cde8a3472353781_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,207 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114348cd7ac35545fb9f8bf2084bed4dce_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114348cd7ac35545fb9f8bf2084bed4dce_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,207 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412115e9c69027b594a94900ffd468e5567fb_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412115e9c69027b594a94900ffd468e5567fb_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,207 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412116f6dd09893044921bde4464503301cd4_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412116f6dd09893044921bde4464503301cd4_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,207 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121164689ff70dac420fba37ca1693f427f6_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121164689ff70dac420fba37ca1693f427f6_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,207 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117e7b7ae515cd44e291e5ea5e5e3ff792_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117e7b7ae515cd44e291e5ea5e5e3ff792_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,208 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121180f2f1d0905c4f77a3ad3d9d96038eb2_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121180f2f1d0905c4f77a3ad3d9d96038eb2_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,208 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211b0c85f5a0ec1476e97af54a49e07c4f4_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211b0c85f5a0ec1476e97af54a49e07c4f4_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,208 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118c9aa60b13724fbc9a55cdca9294eb46_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412118c9aa60b13724fbc9a55cdca9294eb46_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,208 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121181b71790a5e54e3cb2016ed467944621_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121181b71790a5e54e3cb2016ed467944621_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,208 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211bd589bfc3867499ea3ee19badaa15bad_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211bd589bfc3867499ea3ee19badaa15bad_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,208 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211b7fd102a5caf424ba39d260b26dc49fc_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211b7fd102a5caf424ba39d260b26dc49fc_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,209 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211caf4f98c509b4e1da1f2944e88291a90_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211caf4f98c509b4e1da1f2944e88291a90_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,209 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211df253c39a4bc49818e43a60baca16bdb_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211df253c39a4bc49818e43a60baca16bdb_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,209 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e29baae971714fbeb29ad2a81dd07d84_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e29baae971714fbeb29ad2a81dd07d84_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,209 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e47378d56a6c4124ada9c21b95be582d_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e47378d56a6c4124ada9c21b95be582d_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,209 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211ef31453777ed4bac826237163f958859_9dbedb23b89b4496aba995340251c393 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211ef31453777ed4bac826237163f958859_9dbedb23b89b4496aba995340251c393 2024-12-11T04:28:20,210 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-11T04:28:20,212 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=134, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:20,213 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-11T04:28:20,214 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-11T04:28:20,215 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=134, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:20,215 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-11T04:28:20,215 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733891300215"}]},"ts":"9223372036854775807"} 2024-12-11T04:28:20,216 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-11T04:28:20,216 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9dbedb23b89b4496aba995340251c393, NAME => 'TestAcidGuarantees,,1733891273240.9dbedb23b89b4496aba995340251c393.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T04:28:20,216 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-11T04:28:20,217 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733891300216"}]},"ts":"9223372036854775807"} 2024-12-11T04:28:20,218 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-11T04:28:20,220 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=134, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:20,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 36 msec 2024-12-11T04:28:20,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-11T04:28:20,286 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-11T04:28:20,295 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=246 (was 246), OpenFileDescriptor=461 (was 453) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=379 (was 398), ProcessCount=11 (was 11), AvailableMemoryMB=3522 (was 3546) 2024-12-11T04:28:20,303 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=246, OpenFileDescriptor=461, MaxFileDescriptor=1048576, SystemLoadAverage=379, ProcessCount=11, AvailableMemoryMB=3522 2024-12-11T04:28:20,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T04:28:20,305 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T04:28:20,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:20,307 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T04:28:20,307 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:20,307 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 135 2024-12-11T04:28:20,307 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T04:28:20,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-11T04:28:20,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742301_1477 (size=963) 2024-12-11T04:28:20,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-11T04:28:20,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-11T04:28:20,713 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5 2024-12-11T04:28:20,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742302_1478 (size=53) 2024-12-11T04:28:20,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-11T04:28:21,119 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:28:21,119 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing c171d7ccfa412c571490e92799f7df2c, disabling compactions & flushes 2024-12-11T04:28:21,119 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:21,119 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:21,119 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. after waiting 0 ms 2024-12-11T04:28:21,119 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:21,119 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:21,119 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:21,120 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T04:28:21,121 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733891301120"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733891301120"}]},"ts":"1733891301120"} 2024-12-11T04:28:21,122 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T04:28:21,122 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T04:28:21,122 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891301122"}]},"ts":"1733891301122"} 2024-12-11T04:28:21,123 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-11T04:28:21,127 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c171d7ccfa412c571490e92799f7df2c, ASSIGN}] 2024-12-11T04:28:21,127 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c171d7ccfa412c571490e92799f7df2c, ASSIGN 2024-12-11T04:28:21,128 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=136, ppid=135, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=c171d7ccfa412c571490e92799f7df2c, ASSIGN; state=OFFLINE, location=5f466b3719ec,39071,1733891180267; forceNewPlan=false, retain=false 2024-12-11T04:28:21,278 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=136 updating hbase:meta row=c171d7ccfa412c571490e92799f7df2c, regionState=OPENING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:21,279 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; OpenRegionProcedure c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:28:21,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-11T04:28:21,431 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:21,433 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:21,433 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(7285): Opening region: {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} 2024-12-11T04:28:21,434 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:21,434 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:28:21,434 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(7327): checking encryption for c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:21,434 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(7330): checking classloading for c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:21,435 INFO [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:21,436 INFO [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:28:21,436 INFO [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c171d7ccfa412c571490e92799f7df2c columnFamilyName A 2024-12-11T04:28:21,436 DEBUG [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:21,436 INFO [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] regionserver.HStore(327): Store=c171d7ccfa412c571490e92799f7df2c/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:28:21,437 INFO [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:21,437 INFO [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:28:21,438 INFO [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c171d7ccfa412c571490e92799f7df2c columnFamilyName B 2024-12-11T04:28:21,438 DEBUG [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:21,438 INFO [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] regionserver.HStore(327): Store=c171d7ccfa412c571490e92799f7df2c/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:28:21,438 INFO [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:21,439 INFO [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:28:21,439 INFO [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c171d7ccfa412c571490e92799f7df2c columnFamilyName C 2024-12-11T04:28:21,439 DEBUG [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:21,439 INFO [StoreOpener-c171d7ccfa412c571490e92799f7df2c-1 {}] regionserver.HStore(327): Store=c171d7ccfa412c571490e92799f7df2c/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:28:21,440 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:21,440 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:21,440 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:21,441 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T04:28:21,442 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(1085): writing seq id for c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:21,443 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T04:28:21,444 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(1102): Opened c171d7ccfa412c571490e92799f7df2c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66364584, jitterRate=-0.011090636253356934}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T04:28:21,444 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegion(1001): Region open journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:21,445 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., pid=137, masterSystemTime=1733891301431 2024-12-11T04:28:21,446 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:21,446 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=137}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:21,446 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=136 updating hbase:meta row=c171d7ccfa412c571490e92799f7df2c, regionState=OPEN, openSeqNum=2, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:21,448 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-11T04:28:21,448 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; OpenRegionProcedure c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 in 168 msec 2024-12-11T04:28:21,449 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=136, resume processing ppid=135 2024-12-11T04:28:21,449 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, ppid=135, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c171d7ccfa412c571490e92799f7df2c, ASSIGN in 321 msec 2024-12-11T04:28:21,450 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T04:28:21,450 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891301450"}]},"ts":"1733891301450"} 2024-12-11T04:28:21,450 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-11T04:28:21,453 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=135, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T04:28:21,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1480 sec 2024-12-11T04:28:22,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-11T04:28:22,411 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 135 completed 2024-12-11T04:28:22,412 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d0ab200 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32bb71c 2024-12-11T04:28:22,415 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@de9f076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:22,417 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:22,418 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49780, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:22,418 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T04:28:22,419 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T04:28:22,421 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5871c039 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bc0f7c 2024-12-11T04:28:22,423 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4414259d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:22,424 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7daa5922 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b8b6e04 2024-12-11T04:28:22,427 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ed69825, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:22,427 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b7f20c4 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc486e1 2024-12-11T04:28:22,430 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11193a0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:22,431 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f7c40ba to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2070263a 2024-12-11T04:28:22,433 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7861b162, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:22,434 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41b0e7b6 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6050584c 2024-12-11T04:28:22,437 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154f0f85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:22,438 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dd48863 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8a917b 2024-12-11T04:28:22,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3652e74d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:22,442 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51196534 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@54c2725 2024-12-11T04:28:22,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2405c04e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:22,446 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1dc5e114 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79d49886 2024-12-11T04:28:22,449 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73d92042, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:22,450 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3e96b8ad to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@635b1751 2024-12-11T04:28:22,455 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@593af048, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:22,456 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17e5a47d to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2cbfd84f 2024-12-11T04:28:22,464 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2209c520, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:22,470 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:22,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-11T04:28:22,472 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:22,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-11T04:28:22,472 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:22,472 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:22,478 DEBUG [hconnection-0x274704ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:22,478 DEBUG [hconnection-0x7354a98d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:22,479 DEBUG [hconnection-0x5d04b533-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:22,479 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49806, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:22,479 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49784, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:22,479 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49790, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:22,480 DEBUG [hconnection-0x3b89f49-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:22,480 DEBUG [hconnection-0x1e53536f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:22,481 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49810, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:22,481 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49824, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:22,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:22,485 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:28:22,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:22,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:22,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:22,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:22,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:22,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:22,487 DEBUG [hconnection-0x1dc778a0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:22,488 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49830, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:22,488 DEBUG [hconnection-0x24921ad3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:22,488 DEBUG [hconnection-0x57e7b328-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:22,489 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49838, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:22,489 DEBUG [hconnection-0x38b800fc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:22,489 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49840, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:22,491 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:22,495 DEBUG [hconnection-0x22ac90e2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:22,496 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49870, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:22,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891362501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891362502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891362502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,504 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891362503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891362504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/17147dc5f3ef4e5d92fab965424068dc is 50, key is test_row_0/A:col10/1733891302484/Put/seqid=0 2024-12-11T04:28:22,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742303_1479 (size=12001) 2024-12-11T04:28:22,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-11T04:28:22,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891362605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891362605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891362605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891362605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891362605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,624 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-11T04:28:22,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:22,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:22,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:22,624 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:22,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:22,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:22,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-11T04:28:22,776 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-11T04:28:22,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:22,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:22,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:22,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:22,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:22,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:22,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891362808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891362808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891362808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891362809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:22,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891362809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/17147dc5f3ef4e5d92fab965424068dc 2024-12-11T04:28:22,929 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:22,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-11T04:28:22,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:22,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:22,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:22,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:22,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:22,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:22,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e11d72139a1f4fb5bd13447e502d0919 is 50, key is test_row_0/B:col10/1733891302484/Put/seqid=0 2024-12-11T04:28:22,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742304_1480 (size=12001) 2024-12-11T04:28:23,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-11T04:28:23,082 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-11T04:28:23,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:23,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:23,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:23,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:23,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891363112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:23,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891363113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:23,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891363113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:23,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891363114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:23,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891363114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,234 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-11T04:28:23,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:23,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:23,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:23,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,348 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e11d72139a1f4fb5bd13447e502d0919 2024-12-11T04:28:23,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/06c0704f1031404f8ab19fd4dc28ac2c is 50, key is test_row_0/C:col10/1733891302484/Put/seqid=0 2024-12-11T04:28:23,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742305_1481 (size=12001) 2024-12-11T04:28:23,387 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-11T04:28:23,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:23,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:23,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:23,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,539 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-11T04:28:23,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:23,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:23,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:23,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-11T04:28:23,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891363618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:23,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891363619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:23,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891363620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:23,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891363621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:23,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891363622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,692 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-11T04:28:23,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:23,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:23,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:23,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:23,773 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/06c0704f1031404f8ab19fd4dc28ac2c 2024-12-11T04:28:23,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/17147dc5f3ef4e5d92fab965424068dc as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/17147dc5f3ef4e5d92fab965424068dc 2024-12-11T04:28:23,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/17147dc5f3ef4e5d92fab965424068dc, entries=150, sequenceid=12, filesize=11.7 K 2024-12-11T04:28:23,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e11d72139a1f4fb5bd13447e502d0919 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e11d72139a1f4fb5bd13447e502d0919 2024-12-11T04:28:23,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e11d72139a1f4fb5bd13447e502d0919, entries=150, sequenceid=12, filesize=11.7 K 2024-12-11T04:28:23,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/06c0704f1031404f8ab19fd4dc28ac2c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/06c0704f1031404f8ab19fd4dc28ac2c 2024-12-11T04:28:23,793 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/06c0704f1031404f8ab19fd4dc28ac2c, entries=150, sequenceid=12, filesize=11.7 K 2024-12-11T04:28:23,794 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for c171d7ccfa412c571490e92799f7df2c in 1309ms, sequenceid=12, compaction requested=false 2024-12-11T04:28:23,794 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:23,844 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:23,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-11T04:28:23,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:23,845 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-11T04:28:23,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:23,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:23,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:23,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:23,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:23,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:23,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/fd85af27b0064e55a34f7d9213c5d1cf is 50, key is test_row_0/A:col10/1733891302493/Put/seqid=0 2024-12-11T04:28:23,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742306_1482 (size=12001) 2024-12-11T04:28:23,857 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/fd85af27b0064e55a34f7d9213c5d1cf 2024-12-11T04:28:23,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/a7dc37b01c1846b6a71b19903b91ebd3 is 50, key is test_row_0/B:col10/1733891302493/Put/seqid=0 2024-12-11T04:28:23,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742307_1483 (size=12001) 2024-12-11T04:28:24,273 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/a7dc37b01c1846b6a71b19903b91ebd3 2024-12-11T04:28:24,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/09668b35a13a43c39949da2e23b4a2b0 is 50, key is test_row_0/C:col10/1733891302493/Put/seqid=0 2024-12-11T04:28:24,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742308_1484 (size=12001) 2024-12-11T04:28:24,288 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/09668b35a13a43c39949da2e23b4a2b0 2024-12-11T04:28:24,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/fd85af27b0064e55a34f7d9213c5d1cf as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/fd85af27b0064e55a34f7d9213c5d1cf 2024-12-11T04:28:24,295 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/fd85af27b0064e55a34f7d9213c5d1cf, entries=150, sequenceid=38, filesize=11.7 K 2024-12-11T04:28:24,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/a7dc37b01c1846b6a71b19903b91ebd3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/a7dc37b01c1846b6a71b19903b91ebd3 2024-12-11T04:28:24,300 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/a7dc37b01c1846b6a71b19903b91ebd3, entries=150, sequenceid=38, filesize=11.7 K 2024-12-11T04:28:24,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/09668b35a13a43c39949da2e23b4a2b0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/09668b35a13a43c39949da2e23b4a2b0 2024-12-11T04:28:24,306 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/09668b35a13a43c39949da2e23b4a2b0, entries=150, sequenceid=38, filesize=11.7 K 2024-12-11T04:28:24,306 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=0 B/0 for c171d7ccfa412c571490e92799f7df2c in 461ms, sequenceid=38, compaction requested=false 2024-12-11T04:28:24,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:24,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:24,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-11T04:28:24,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-11T04:28:24,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-11T04:28:24,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8360 sec 2024-12-11T04:28:24,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.8410 sec 2024-12-11T04:28:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-11T04:28:24,576 INFO [Thread-2131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-11T04:28:24,577 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-12-11T04:28:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-11T04:28:24,578 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:24,579 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:24,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:24,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:28:24,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:24,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:24,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:24,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:24,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:24,637 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:24,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/b3e1e62199d14af28c42f83ae9fa1ec9 is 50, key is test_row_0/A:col10/1733891304636/Put/seqid=0 2024-12-11T04:28:24,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742309_1485 (size=16681) 2024-12-11T04:28:24,649 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/b3e1e62199d14af28c42f83ae9fa1ec9 2024-12-11T04:28:24,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/30c6c3dc44884d9d80977cc5e5ebe8e0 is 50, key is test_row_0/B:col10/1733891304636/Put/seqid=0 2024-12-11T04:28:24,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742310_1486 (size=12001) 2024-12-11T04:28:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-11T04:28:24,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891364691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:24,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891364692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:24,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:24,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891364692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891364693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:24,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891364693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:24,730 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:24,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-11T04:28:24,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:24,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:24,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:24,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:24,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:24,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:24,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891364798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:24,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:24,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891364801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:24,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:24,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891364801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:24,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:24,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891364801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:24,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:24,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891364801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:24,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-11T04:28:24,882 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:24,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-11T04:28:24,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:24,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:24,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:24,883 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:24,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:24,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891365002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891365004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891365004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891365005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891365005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,035 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-11T04:28:25,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:25,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,065 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/30c6c3dc44884d9d80977cc5e5ebe8e0 2024-12-11T04:28:25,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/30ba8118544e44408d82836a96afa319 is 50, key is test_row_0/C:col10/1733891304636/Put/seqid=0 2024-12-11T04:28:25,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742311_1487 (size=12001) 2024-12-11T04:28:25,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-11T04:28:25,188 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-11T04:28:25,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:25,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891365307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891365309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891365309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891365310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891365310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,340 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,341 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-11T04:28:25,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:25,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,341 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/30ba8118544e44408d82836a96afa319 2024-12-11T04:28:25,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/b3e1e62199d14af28c42f83ae9fa1ec9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b3e1e62199d14af28c42f83ae9fa1ec9 2024-12-11T04:28:25,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b3e1e62199d14af28c42f83ae9fa1ec9, entries=250, sequenceid=51, filesize=16.3 K 2024-12-11T04:28:25,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/30c6c3dc44884d9d80977cc5e5ebe8e0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/30c6c3dc44884d9d80977cc5e5ebe8e0 2024-12-11T04:28:25,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/30c6c3dc44884d9d80977cc5e5ebe8e0, entries=150, sequenceid=51, filesize=11.7 K 2024-12-11T04:28:25,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/30ba8118544e44408d82836a96afa319 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/30ba8118544e44408d82836a96afa319 2024-12-11T04:28:25,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/30ba8118544e44408d82836a96afa319, entries=150, sequenceid=51, filesize=11.7 K 2024-12-11T04:28:25,493 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-11T04:28:25,494 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for c171d7ccfa412c571490e92799f7df2c in 858ms, sequenceid=51, compaction requested=true 2024-12-11T04:28:25,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:25,494 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:28:25,494 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:25,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:25,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:25,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:25,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:25,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:25,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:25,494 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:25,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:25,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:25,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:25,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:25,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:25,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:25,496 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:25,496 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:25,496 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/B is initiating minor compaction (all files) 2024-12-11T04:28:25,496 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/A is initiating minor compaction (all files) 2024-12-11T04:28:25,496 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/B in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,496 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/A in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,496 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/17147dc5f3ef4e5d92fab965424068dc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/fd85af27b0064e55a34f7d9213c5d1cf, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b3e1e62199d14af28c42f83ae9fa1ec9] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=39.7 K 2024-12-11T04:28:25,496 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e11d72139a1f4fb5bd13447e502d0919, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/a7dc37b01c1846b6a71b19903b91ebd3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/30c6c3dc44884d9d80977cc5e5ebe8e0] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=35.2 K 2024-12-11T04:28:25,497 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17147dc5f3ef4e5d92fab965424068dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733891302482 2024-12-11T04:28:25,497 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e11d72139a1f4fb5bd13447e502d0919, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733891302482 2024-12-11T04:28:25,497 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting a7dc37b01c1846b6a71b19903b91ebd3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733891302493 2024-12-11T04:28:25,497 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd85af27b0064e55a34f7d9213c5d1cf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733891302493 2024-12-11T04:28:25,497 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 30c6c3dc44884d9d80977cc5e5ebe8e0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733891304630 2024-12-11T04:28:25,497 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3e1e62199d14af28c42f83ae9fa1ec9, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733891304629 2024-12-11T04:28:25,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/24beb141f8564751bcba3fb688c2e369 is 50, key is test_row_0/A:col10/1733891304692/Put/seqid=0 2024-12-11T04:28:25,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742312_1488 (size=12001) 2024-12-11T04:28:25,513 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#B#compaction#409 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:25,513 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5d238cffce2f431aa40f7464d53cf5ad is 50, key is test_row_0/B:col10/1733891304636/Put/seqid=0 2024-12-11T04:28:25,515 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/24beb141f8564751bcba3fb688c2e369 2024-12-11T04:28:25,516 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#A#compaction#410 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:25,516 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/78384c01f5d4424b9048099a0412c324 is 50, key is test_row_0/A:col10/1733891304636/Put/seqid=0 2024-12-11T04:28:25,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/51ec0d62228d4a1cb5d927ad7d0b8494 is 50, key is test_row_0/B:col10/1733891304692/Put/seqid=0 2024-12-11T04:28:25,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742314_1490 (size=12104) 2024-12-11T04:28:25,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742313_1489 (size=12104) 2024-12-11T04:28:25,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742315_1491 (size=12001) 2024-12-11T04:28:25,558 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/51ec0d62228d4a1cb5d927ad7d0b8494 2024-12-11T04:28:25,561 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5d238cffce2f431aa40f7464d53cf5ad as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5d238cffce2f431aa40f7464d53cf5ad 2024-12-11T04:28:25,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/834e4651361c4567904ae716abb714ff is 50, key is test_row_0/C:col10/1733891304692/Put/seqid=0 2024-12-11T04:28:25,570 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/B of c171d7ccfa412c571490e92799f7df2c into 5d238cffce2f431aa40f7464d53cf5ad(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:25,570 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:25,570 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/B, priority=13, startTime=1733891305494; duration=0sec 2024-12-11T04:28:25,570 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:25,570 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:B 2024-12-11T04:28:25,570 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:25,571 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:25,571 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/C is initiating minor compaction (all files) 2024-12-11T04:28:25,571 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/C in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,572 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/06c0704f1031404f8ab19fd4dc28ac2c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/09668b35a13a43c39949da2e23b4a2b0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/30ba8118544e44408d82836a96afa319] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=35.2 K 2024-12-11T04:28:25,572 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 06c0704f1031404f8ab19fd4dc28ac2c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733891302482 2024-12-11T04:28:25,572 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 09668b35a13a43c39949da2e23b4a2b0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1733891302493 2024-12-11T04:28:25,573 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 30ba8118544e44408d82836a96afa319, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733891304630 2024-12-11T04:28:25,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742316_1492 (size=12001) 2024-12-11T04:28:25,584 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/834e4651361c4567904ae716abb714ff 2024-12-11T04:28:25,586 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#C#compaction#413 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:25,586 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/1c4fb582ab90432bbf49837841680910 is 50, key is test_row_0/C:col10/1733891304636/Put/seqid=0 2024-12-11T04:28:25,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/24beb141f8564751bcba3fb688c2e369 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/24beb141f8564751bcba3fb688c2e369 2024-12-11T04:28:25,592 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/24beb141f8564751bcba3fb688c2e369, entries=150, sequenceid=75, filesize=11.7 K 2024-12-11T04:28:25,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/51ec0d62228d4a1cb5d927ad7d0b8494 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/51ec0d62228d4a1cb5d927ad7d0b8494 2024-12-11T04:28:25,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742317_1493 (size=12104) 2024-12-11T04:28:25,598 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/51ec0d62228d4a1cb5d927ad7d0b8494, entries=150, sequenceid=75, filesize=11.7 K 2024-12-11T04:28:25,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/834e4651361c4567904ae716abb714ff as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/834e4651361c4567904ae716abb714ff 2024-12-11T04:28:25,603 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/834e4651361c4567904ae716abb714ff, entries=150, sequenceid=75, filesize=11.7 K 2024-12-11T04:28:25,605 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for c171d7ccfa412c571490e92799f7df2c in 111ms, sequenceid=75, compaction requested=false 2024-12-11T04:28:25,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:25,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-11T04:28:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-12-11T04:28:25,607 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-11T04:28:25,607 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0270 sec 2024-12-11T04:28:25,609 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.0310 sec 2024-12-11T04:28:25,661 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T04:28:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-11T04:28:25,681 INFO [Thread-2131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-11T04:28:25,683 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-12-11T04:28:25,684 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:25,685 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:25,685 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-11T04:28:25,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-11T04:28:25,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:25,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:28:25,828 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:25,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:25,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:25,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:25,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:25,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:25,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/977b52fabd55432cba1c6f7813ec46fb is 50, key is test_row_0/A:col10/1733891305826/Put/seqid=0 2024-12-11T04:28:25,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742318_1494 (size=12001) 2024-12-11T04:28:25,838 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-11T04:28:25,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:25,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,838 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891365861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891365863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891365864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891365865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891365867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,958 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/78384c01f5d4424b9048099a0412c324 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/78384c01f5d4424b9048099a0412c324 2024-12-11T04:28:25,962 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/A of c171d7ccfa412c571490e92799f7df2c into 78384c01f5d4424b9048099a0412c324(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:25,962 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:25,962 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/A, priority=13, startTime=1733891305494; duration=0sec 2024-12-11T04:28:25,962 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:25,962 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:A 2024-12-11T04:28:25,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891365968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891365971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891365971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891365972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:25,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891365973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-11T04:28:25,990 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:25,990 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-11T04:28:25,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:25,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:25,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:25,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,000 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/1c4fb582ab90432bbf49837841680910 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/1c4fb582ab90432bbf49837841680910 2024-12-11T04:28:26,003 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/C of c171d7ccfa412c571490e92799f7df2c into 1c4fb582ab90432bbf49837841680910(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:26,003 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:26,003 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/C, priority=13, startTime=1733891305495; duration=0sec 2024-12-11T04:28:26,004 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:26,004 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:C 2024-12-11T04:28:26,143 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-11T04:28:26,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:26,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:26,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:26,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891366173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891366174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891366175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891366175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891366180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,237 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/977b52fabd55432cba1c6f7813ec46fb 2024-12-11T04:28:26,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/d94b94177ee247498d937a60a34a519b is 50, key is test_row_0/B:col10/1733891305826/Put/seqid=0 2024-12-11T04:28:26,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742319_1495 (size=12001) 2024-12-11T04:28:26,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-11T04:28:26,296 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-11T04:28:26,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:26,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:26,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:26,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,448 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-11T04:28:26,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:26,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:26,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:26,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891366479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891366479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891366480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891366480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891366488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,601 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-11T04:28:26,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:26,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:26,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:26,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,650 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/d94b94177ee247498d937a60a34a519b 2024-12-11T04:28:26,656 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/7a035bcfef664a558c9a816729250a9d is 50, key is test_row_0/C:col10/1733891305826/Put/seqid=0 2024-12-11T04:28:26,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742320_1496 (size=12001) 2024-12-11T04:28:26,754 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-11T04:28:26,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:26,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:26,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:26,755 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-11T04:28:26,907 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,907 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-11T04:28:26,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:26,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:26,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:26,908 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:26,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891366983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891366986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891366987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891366988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:26,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:26,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891366991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:27,059 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:27,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-11T04:28:27,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:27,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:27,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:27,060 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:27,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:27,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:27,061 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/7a035bcfef664a558c9a816729250a9d 2024-12-11T04:28:27,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/977b52fabd55432cba1c6f7813ec46fb as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/977b52fabd55432cba1c6f7813ec46fb 2024-12-11T04:28:27,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/977b52fabd55432cba1c6f7813ec46fb, entries=150, sequenceid=88, filesize=11.7 K 2024-12-11T04:28:27,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/d94b94177ee247498d937a60a34a519b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/d94b94177ee247498d937a60a34a519b 2024-12-11T04:28:27,073 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/d94b94177ee247498d937a60a34a519b, entries=150, sequenceid=88, filesize=11.7 K 2024-12-11T04:28:27,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/7a035bcfef664a558c9a816729250a9d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/7a035bcfef664a558c9a816729250a9d 2024-12-11T04:28:27,077 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/7a035bcfef664a558c9a816729250a9d, entries=150, sequenceid=88, filesize=11.7 K 2024-12-11T04:28:27,078 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c171d7ccfa412c571490e92799f7df2c in 1251ms, sequenceid=88, compaction requested=true 2024-12-11T04:28:27,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:27,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:27,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:27,078 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:27,078 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:27,078 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:27,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:27,079 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:27,080 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:27,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:27,080 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/A is initiating minor compaction (all files) 2024-12-11T04:28:27,080 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/A in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:27,080 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:27,080 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/B is initiating minor compaction (all files) 2024-12-11T04:28:27,080 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/78384c01f5d4424b9048099a0412c324, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/24beb141f8564751bcba3fb688c2e369, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/977b52fabd55432cba1c6f7813ec46fb] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=35.3 K 2024-12-11T04:28:27,080 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/B in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:27,080 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5d238cffce2f431aa40f7464d53cf5ad, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/51ec0d62228d4a1cb5d927ad7d0b8494, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/d94b94177ee247498d937a60a34a519b] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=35.3 K 2024-12-11T04:28:27,080 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78384c01f5d4424b9048099a0412c324, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733891304630 2024-12-11T04:28:27,080 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d238cffce2f431aa40f7464d53cf5ad, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733891304630 2024-12-11T04:28:27,082 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 24beb141f8564751bcba3fb688c2e369, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733891304691 2024-12-11T04:28:27,082 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 51ec0d62228d4a1cb5d927ad7d0b8494, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733891304691 2024-12-11T04:28:27,082 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 977b52fabd55432cba1c6f7813ec46fb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733891305824 2024-12-11T04:28:27,082 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting d94b94177ee247498d937a60a34a519b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733891305824 2024-12-11T04:28:27,090 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#A#compaction#417 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:27,091 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/ac60cf650fd24cf094d97dcfa9e6829f is 50, key is test_row_0/A:col10/1733891305826/Put/seqid=0 2024-12-11T04:28:27,106 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#B#compaction#418 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:27,106 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/331b19d0e37c441aabaab2c560b4b039 is 50, key is test_row_0/B:col10/1733891305826/Put/seqid=0 2024-12-11T04:28:27,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742321_1497 (size=12207) 2024-12-11T04:28:27,122 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/ac60cf650fd24cf094d97dcfa9e6829f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/ac60cf650fd24cf094d97dcfa9e6829f 2024-12-11T04:28:27,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742322_1498 (size=12207) 2024-12-11T04:28:27,131 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/A of c171d7ccfa412c571490e92799f7df2c into ac60cf650fd24cf094d97dcfa9e6829f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:27,131 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:27,131 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/331b19d0e37c441aabaab2c560b4b039 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/331b19d0e37c441aabaab2c560b4b039 2024-12-11T04:28:27,131 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/A, priority=13, startTime=1733891307078; duration=0sec 2024-12-11T04:28:27,131 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:27,131 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:A 2024-12-11T04:28:27,131 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:27,135 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:27,135 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/C is initiating minor compaction (all files) 2024-12-11T04:28:27,135 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/C in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:27,135 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/1c4fb582ab90432bbf49837841680910, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/834e4651361c4567904ae716abb714ff, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/7a035bcfef664a558c9a816729250a9d] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=35.3 K 2024-12-11T04:28:27,136 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c4fb582ab90432bbf49837841680910, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733891304630 2024-12-11T04:28:27,136 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/B of c171d7ccfa412c571490e92799f7df2c into 331b19d0e37c441aabaab2c560b4b039(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:27,136 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:27,136 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/B, priority=13, startTime=1733891307078; duration=0sec 2024-12-11T04:28:27,136 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:27,136 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:B 2024-12-11T04:28:27,136 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 834e4651361c4567904ae716abb714ff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733891304691 2024-12-11T04:28:27,137 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a035bcfef664a558c9a816729250a9d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733891305824 2024-12-11T04:28:27,162 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#C#compaction#419 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:27,163 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/de9be0a1b3bb4ed5b62b72ff989e262f is 50, key is test_row_0/C:col10/1733891305826/Put/seqid=0 2024-12-11T04:28:27,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742323_1499 (size=12207) 2024-12-11T04:28:27,187 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/de9be0a1b3bb4ed5b62b72ff989e262f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/de9be0a1b3bb4ed5b62b72ff989e262f 2024-12-11T04:28:27,193 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/C of c171d7ccfa412c571490e92799f7df2c into de9be0a1b3bb4ed5b62b72ff989e262f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:27,193 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:27,193 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/C, priority=13, startTime=1733891307079; duration=0sec 2024-12-11T04:28:27,193 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:27,193 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:C 2024-12-11T04:28:27,212 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:27,213 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-11T04:28:27,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:27,213 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:28:27,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:27,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:27,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:27,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:27,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:27,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:27,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/73ffc29eac9a4eeeaba2051022d63f96 is 50, key is test_row_0/A:col10/1733891305865/Put/seqid=0 2024-12-11T04:28:27,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742324_1500 (size=12001) 2024-12-11T04:28:27,231 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/73ffc29eac9a4eeeaba2051022d63f96 2024-12-11T04:28:27,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/03e387fd5ed4462a83356706896c69c8 is 50, key is test_row_0/B:col10/1733891305865/Put/seqid=0 2024-12-11T04:28:27,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742325_1501 (size=12001) 2024-12-11T04:28:27,260 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/03e387fd5ed4462a83356706896c69c8 2024-12-11T04:28:27,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/0761b08dc4874533b10bbf8d5a1f25a4 is 50, key is test_row_0/C:col10/1733891305865/Put/seqid=0 2024-12-11T04:28:27,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742326_1502 (size=12001) 2024-12-11T04:28:27,286 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/0761b08dc4874533b10bbf8d5a1f25a4 2024-12-11T04:28:27,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/73ffc29eac9a4eeeaba2051022d63f96 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/73ffc29eac9a4eeeaba2051022d63f96 2024-12-11T04:28:27,294 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/73ffc29eac9a4eeeaba2051022d63f96, entries=150, sequenceid=117, filesize=11.7 K 2024-12-11T04:28:27,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/03e387fd5ed4462a83356706896c69c8 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/03e387fd5ed4462a83356706896c69c8 2024-12-11T04:28:27,299 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/03e387fd5ed4462a83356706896c69c8, entries=150, sequenceid=117, filesize=11.7 K 2024-12-11T04:28:27,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/0761b08dc4874533b10bbf8d5a1f25a4 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/0761b08dc4874533b10bbf8d5a1f25a4 2024-12-11T04:28:27,303 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/0761b08dc4874533b10bbf8d5a1f25a4, entries=150, sequenceid=117, filesize=11.7 K 2024-12-11T04:28:27,304 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for c171d7ccfa412c571490e92799f7df2c in 91ms, sequenceid=117, compaction requested=false 2024-12-11T04:28:27,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:27,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:27,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-11T04:28:27,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-11T04:28:27,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-11T04:28:27,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6200 sec 2024-12-11T04:28:27,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.6230 sec 2024-12-11T04:28:27,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-11T04:28:27,790 INFO [Thread-2131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-11T04:28:27,792 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:27,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-12-11T04:28:27,793 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:27,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-11T04:28:27,794 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:27,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:27,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-11T04:28:27,945 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:27,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-11T04:28:27,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:27,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:27,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:27,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-11T04:28:27,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-11T04:28:27,948 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-11T04:28:27,948 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 153 msec 2024-12-11T04:28:27,949 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 156 msec 2024-12-11T04:28:28,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:28,006 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:28:28,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:28,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:28,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:28,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:28,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:28,006 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:28,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/b61fccd5b682428397dae6cddf25a3b5 is 50, key is test_row_0/A:col10/1733891308005/Put/seqid=0 2024-12-11T04:28:28,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742327_1503 (size=14391) 2024-12-11T04:28:28,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891368038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891368040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891368070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891368070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891368070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-11T04:28:28,096 INFO [Thread-2131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-11T04:28:28,097 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:28,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-12-11T04:28:28,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-11T04:28:28,098 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:28,099 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:28,099 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:28,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891368171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891368171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891368179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891368179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891368180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-11T04:28:28,250 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-11T04:28:28,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:28,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:28,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:28,251 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:28,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:28,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:28,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891368376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891368375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891368384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891368384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891368384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-11T04:28:28,402 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,402 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-11T04:28:28,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:28,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:28,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:28,402 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:28,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:28,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:28,417 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/b61fccd5b682428397dae6cddf25a3b5 2024-12-11T04:28:28,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5eeb77fdcf1546f4a22d0f6494c7c9e0 is 50, key is test_row_0/B:col10/1733891308005/Put/seqid=0 2024-12-11T04:28:28,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742328_1504 (size=12051) 2024-12-11T04:28:28,426 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5eeb77fdcf1546f4a22d0f6494c7c9e0 2024-12-11T04:28:28,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/bbd96fe3a33f4be9a01c628683bb2f89 is 50, key is test_row_0/C:col10/1733891308005/Put/seqid=0 2024-12-11T04:28:28,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742329_1505 (size=12051) 2024-12-11T04:28:28,554 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,555 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-11T04:28:28,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:28,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:28,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:28,555 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:28,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:28,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:28,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891368683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891368685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891368691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891368691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:28,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891368693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-11T04:28:28,707 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-11T04:28:28,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:28,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:28,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:28,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:28,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:28,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:28,836 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/bbd96fe3a33f4be9a01c628683bb2f89 2024-12-11T04:28:28,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/b61fccd5b682428397dae6cddf25a3b5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b61fccd5b682428397dae6cddf25a3b5 2024-12-11T04:28:28,843 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b61fccd5b682428397dae6cddf25a3b5, entries=200, sequenceid=129, filesize=14.1 K 2024-12-11T04:28:28,844 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5eeb77fdcf1546f4a22d0f6494c7c9e0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5eeb77fdcf1546f4a22d0f6494c7c9e0 2024-12-11T04:28:28,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5eeb77fdcf1546f4a22d0f6494c7c9e0, entries=150, sequenceid=129, filesize=11.8 K 2024-12-11T04:28:28,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/bbd96fe3a33f4be9a01c628683bb2f89 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/bbd96fe3a33f4be9a01c628683bb2f89 2024-12-11T04:28:28,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/bbd96fe3a33f4be9a01c628683bb2f89, entries=150, sequenceid=129, filesize=11.8 K 2024-12-11T04:28:28,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for c171d7ccfa412c571490e92799f7df2c in 849ms, sequenceid=129, compaction requested=true 2024-12-11T04:28:28,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:28,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:28,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:28,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:28,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:28,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:28,855 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:28,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:28,855 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:28,856 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38599 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:28,856 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/A is initiating minor compaction (all files) 2024-12-11T04:28:28,856 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/A in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:28,856 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/ac60cf650fd24cf094d97dcfa9e6829f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/73ffc29eac9a4eeeaba2051022d63f96, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b61fccd5b682428397dae6cddf25a3b5] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=37.7 K 2024-12-11T04:28:28,856 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:28,857 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/B is initiating minor compaction (all files) 2024-12-11T04:28:28,857 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/B in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:28,857 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/331b19d0e37c441aabaab2c560b4b039, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/03e387fd5ed4462a83356706896c69c8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5eeb77fdcf1546f4a22d0f6494c7c9e0] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=35.4 K 2024-12-11T04:28:28,857 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 331b19d0e37c441aabaab2c560b4b039, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733891305824 2024-12-11T04:28:28,857 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac60cf650fd24cf094d97dcfa9e6829f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733891305824 2024-12-11T04:28:28,857 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73ffc29eac9a4eeeaba2051022d63f96, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733891305860 2024-12-11T04:28:28,857 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 03e387fd5ed4462a83356706896c69c8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733891305860 2024-12-11T04:28:28,858 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5eeb77fdcf1546f4a22d0f6494c7c9e0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733891308000 2024-12-11T04:28:28,858 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting b61fccd5b682428397dae6cddf25a3b5, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733891308000 2024-12-11T04:28:28,859 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:28,860 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-11T04:28:28,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:28,860 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-11T04:28:28,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:28,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:28,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:28,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:28,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:28,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:28,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/a009e5e3b8b542db854fb57ff1102d9f is 50, key is test_row_0/A:col10/1733891308041/Put/seqid=0 2024-12-11T04:28:28,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742330_1506 (size=12151) 2024-12-11T04:28:28,867 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#A#compaction#427 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:28,868 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#B#compaction#428 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:28,869 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/abe5c04fbc5a4302a356c44ada194790 is 50, key is test_row_0/A:col10/1733891308005/Put/seqid=0 2024-12-11T04:28:28,869 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5c7ffe367a3c4048af5cbd038f3f64f0 is 50, key is test_row_0/B:col10/1733891308005/Put/seqid=0 2024-12-11T04:28:28,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742331_1507 (size=12359) 2024-12-11T04:28:28,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742332_1508 (size=12359) 2024-12-11T04:28:29,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:29,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:29,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-11T04:28:29,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891369199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891369200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891369201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891369202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891369203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,268 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/a009e5e3b8b542db854fb57ff1102d9f 2024-12-11T04:28:29,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/1011caed1e34447da1ce8941ecd387ba is 50, key is test_row_0/B:col10/1733891308041/Put/seqid=0 2024-12-11T04:28:29,284 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/abe5c04fbc5a4302a356c44ada194790 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/abe5c04fbc5a4302a356c44ada194790 2024-12-11T04:28:29,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742333_1509 (size=12151) 2024-12-11T04:28:29,285 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/1011caed1e34447da1ce8941ecd387ba 2024-12-11T04:28:29,286 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5c7ffe367a3c4048af5cbd038f3f64f0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5c7ffe367a3c4048af5cbd038f3f64f0 2024-12-11T04:28:29,290 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/A of c171d7ccfa412c571490e92799f7df2c into abe5c04fbc5a4302a356c44ada194790(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:29,290 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:29,290 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/A, priority=13, startTime=1733891308855; duration=0sec 2024-12-11T04:28:29,290 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:29,290 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:A 2024-12-11T04:28:29,290 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:29,291 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/B of c171d7ccfa412c571490e92799f7df2c into 5c7ffe367a3c4048af5cbd038f3f64f0(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:29,291 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:29,291 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/B, priority=13, startTime=1733891308855; duration=0sec 2024-12-11T04:28:29,291 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:29,291 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:B 2024-12-11T04:28:29,292 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:29,292 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/C is initiating minor compaction (all files) 2024-12-11T04:28:29,292 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/C in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:29,292 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/de9be0a1b3bb4ed5b62b72ff989e262f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/0761b08dc4874533b10bbf8d5a1f25a4, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/bbd96fe3a33f4be9a01c628683bb2f89] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=35.4 K 2024-12-11T04:28:29,292 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting de9be0a1b3bb4ed5b62b72ff989e262f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733891305824 2024-12-11T04:28:29,293 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0761b08dc4874533b10bbf8d5a1f25a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1733891305860 2024-12-11T04:28:29,293 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbd96fe3a33f4be9a01c628683bb2f89, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733891308000 2024-12-11T04:28:29,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/ab843211b9444132bac014fa9e8892f5 is 50, key is test_row_0/C:col10/1733891308041/Put/seqid=0 2024-12-11T04:28:29,300 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#C#compaction#431 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:29,301 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/aeafff1dbbc9484f8f9b76c07ecc429f is 50, key is test_row_0/C:col10/1733891308005/Put/seqid=0 2024-12-11T04:28:29,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891369305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891369307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891369311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891369311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742334_1510 (size=12359) 2024-12-11T04:28:29,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742335_1511 (size=12151) 2024-12-11T04:28:29,330 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/ab843211b9444132bac014fa9e8892f5 2024-12-11T04:28:29,331 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/aeafff1dbbc9484f8f9b76c07ecc429f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/aeafff1dbbc9484f8f9b76c07ecc429f 2024-12-11T04:28:29,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/a009e5e3b8b542db854fb57ff1102d9f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/a009e5e3b8b542db854fb57ff1102d9f 2024-12-11T04:28:29,337 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/C of c171d7ccfa412c571490e92799f7df2c into aeafff1dbbc9484f8f9b76c07ecc429f(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:29,337 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:29,338 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/C, priority=13, startTime=1733891308855; duration=0sec 2024-12-11T04:28:29,338 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:29,338 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:C 2024-12-11T04:28:29,339 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/a009e5e3b8b542db854fb57ff1102d9f, entries=150, sequenceid=156, filesize=11.9 K 2024-12-11T04:28:29,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/1011caed1e34447da1ce8941ecd387ba as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/1011caed1e34447da1ce8941ecd387ba 2024-12-11T04:28:29,343 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/1011caed1e34447da1ce8941ecd387ba, entries=150, sequenceid=156, filesize=11.9 K 2024-12-11T04:28:29,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/ab843211b9444132bac014fa9e8892f5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/ab843211b9444132bac014fa9e8892f5 2024-12-11T04:28:29,347 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/ab843211b9444132bac014fa9e8892f5, entries=150, sequenceid=156, filesize=11.9 K 2024-12-11T04:28:29,348 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for c171d7ccfa412c571490e92799f7df2c in 488ms, sequenceid=156, compaction requested=false 2024-12-11T04:28:29,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:29,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:29,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-12-11T04:28:29,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-12-11T04:28:29,351 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-11T04:28:29,351 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2510 sec 2024-12-11T04:28:29,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.2540 sec 2024-12-11T04:28:29,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:29,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:28:29,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:29,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:29,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:29,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:29,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:29,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:29,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/304cd594bb334e189a6da7025c4f596d is 50, key is test_row_0/A:col10/1733891309515/Put/seqid=0 2024-12-11T04:28:29,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742336_1512 (size=12147) 2024-12-11T04:28:29,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/304cd594bb334e189a6da7025c4f596d 2024-12-11T04:28:29,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/0c4c7286d4c64fd8aef58bce32b0041a is 50, key is test_row_0/B:col10/1733891309515/Put/seqid=0 2024-12-11T04:28:29,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891369546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891369546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891369547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891369547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742337_1513 (size=9757) 2024-12-11T04:28:29,561 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/0c4c7286d4c64fd8aef58bce32b0041a 2024-12-11T04:28:29,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/9bc61d16b36e441b858e08ac2c7d2493 is 50, key is test_row_0/C:col10/1733891309515/Put/seqid=0 2024-12-11T04:28:29,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742338_1514 (size=9757) 2024-12-11T04:28:29,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891369653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891369653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891369654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891369655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891369857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891369858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891369858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:29,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891369861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:29,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/9bc61d16b36e441b858e08ac2c7d2493 2024-12-11T04:28:29,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/304cd594bb334e189a6da7025c4f596d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/304cd594bb334e189a6da7025c4f596d 2024-12-11T04:28:29,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/304cd594bb334e189a6da7025c4f596d, entries=150, sequenceid=171, filesize=11.9 K 2024-12-11T04:28:29,981 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/0c4c7286d4c64fd8aef58bce32b0041a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/0c4c7286d4c64fd8aef58bce32b0041a 2024-12-11T04:28:29,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/0c4c7286d4c64fd8aef58bce32b0041a, entries=100, sequenceid=171, filesize=9.5 K 2024-12-11T04:28:29,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/9bc61d16b36e441b858e08ac2c7d2493 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/9bc61d16b36e441b858e08ac2c7d2493 2024-12-11T04:28:29,987 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/9bc61d16b36e441b858e08ac2c7d2493, entries=100, sequenceid=171, filesize=9.5 K 2024-12-11T04:28:29,988 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for c171d7ccfa412c571490e92799f7df2c in 473ms, sequenceid=171, compaction requested=true 2024-12-11T04:28:29,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:29,988 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:29,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:29,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:29,989 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:29,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:29,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:29,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:29,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:29,989 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36657 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:29,989 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/A is initiating minor compaction (all files) 2024-12-11T04:28:29,989 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/A in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:29,990 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/abe5c04fbc5a4302a356c44ada194790, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/a009e5e3b8b542db854fb57ff1102d9f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/304cd594bb334e189a6da7025c4f596d] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=35.8 K 2024-12-11T04:28:29,990 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:29,990 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting abe5c04fbc5a4302a356c44ada194790, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733891308000 2024-12-11T04:28:29,990 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/B is initiating minor compaction (all files) 2024-12-11T04:28:29,990 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/B in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:29,990 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5c7ffe367a3c4048af5cbd038f3f64f0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/1011caed1e34447da1ce8941ecd387ba, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/0c4c7286d4c64fd8aef58bce32b0041a] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=33.5 K 2024-12-11T04:28:29,990 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting a009e5e3b8b542db854fb57ff1102d9f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733891308039 2024-12-11T04:28:29,991 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c7ffe367a3c4048af5cbd038f3f64f0, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733891308000 2024-12-11T04:28:29,991 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 304cd594bb334e189a6da7025c4f596d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733891309202 2024-12-11T04:28:29,991 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 1011caed1e34447da1ce8941ecd387ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733891308039 2024-12-11T04:28:29,991 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c4c7286d4c64fd8aef58bce32b0041a, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733891309202 2024-12-11T04:28:29,998 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#A#compaction#435 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:29,998 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#B#compaction#436 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:29,999 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/e2a9b215d467461abae154b38f946cb7 is 50, key is test_row_0/A:col10/1733891309515/Put/seqid=0 2024-12-11T04:28:29,999 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/b464acbbd0e84a0f9cdbc924be8dd7f9 is 50, key is test_row_0/B:col10/1733891309515/Put/seqid=0 2024-12-11T04:28:30,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742339_1515 (size=12561) 2024-12-11T04:28:30,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742340_1516 (size=12561) 2024-12-11T04:28:30,020 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/e2a9b215d467461abae154b38f946cb7 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/e2a9b215d467461abae154b38f946cb7 2024-12-11T04:28:30,028 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/b464acbbd0e84a0f9cdbc924be8dd7f9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/b464acbbd0e84a0f9cdbc924be8dd7f9 2024-12-11T04:28:30,030 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/A of c171d7ccfa412c571490e92799f7df2c into e2a9b215d467461abae154b38f946cb7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:30,030 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:30,030 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/A, priority=13, startTime=1733891309988; duration=0sec 2024-12-11T04:28:30,030 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:30,030 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:A 2024-12-11T04:28:30,031 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:30,033 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:30,033 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/C is initiating minor compaction (all files) 2024-12-11T04:28:30,033 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/C in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:30,033 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/aeafff1dbbc9484f8f9b76c07ecc429f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/ab843211b9444132bac014fa9e8892f5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/9bc61d16b36e441b858e08ac2c7d2493] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=33.5 K 2024-12-11T04:28:30,034 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting aeafff1dbbc9484f8f9b76c07ecc429f, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733891308000 2024-12-11T04:28:30,035 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/B of c171d7ccfa412c571490e92799f7df2c into b464acbbd0e84a0f9cdbc924be8dd7f9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:30,035 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:30,035 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/B, priority=13, startTime=1733891309989; duration=0sec 2024-12-11T04:28:30,035 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:30,035 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:B 2024-12-11T04:28:30,035 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab843211b9444132bac014fa9e8892f5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733891308039 2024-12-11T04:28:30,036 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bc61d16b36e441b858e08ac2c7d2493, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733891309202 2024-12-11T04:28:30,045 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#C#compaction#437 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:30,046 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/350f4459821a4110960c859cfc1af35e is 50, key is test_row_0/C:col10/1733891309515/Put/seqid=0 2024-12-11T04:28:30,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742341_1517 (size=12561) 2024-12-11T04:28:30,079 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/350f4459821a4110960c859cfc1af35e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/350f4459821a4110960c859cfc1af35e 2024-12-11T04:28:30,089 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/C of c171d7ccfa412c571490e92799f7df2c into 350f4459821a4110960c859cfc1af35e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:30,089 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:30,089 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/C, priority=13, startTime=1733891309989; duration=0sec 2024-12-11T04:28:30,089 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:30,089 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:C 2024-12-11T04:28:30,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:30,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-11T04:28:30,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:30,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:30,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:30,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:30,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:30,165 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:30,169 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/8beec5f9d12a4746b59447f72bd7a3af is 50, key is test_row_0/A:col10/1733891309545/Put/seqid=0 2024-12-11T04:28:30,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742342_1518 (size=14541) 2024-12-11T04:28:30,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891370170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891370171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891370172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891370175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-11T04:28:30,202 INFO [Thread-2131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-12-11T04:28:30,203 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:30,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-12-11T04:28:30,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-11T04:28:30,204 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:30,205 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:30,205 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:30,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891370204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891370276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891370277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891370277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891370280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-11T04:28:30,357 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-11T04:28:30,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:30,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:30,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:30,358 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,484 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891370481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891370482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891370483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891370484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-11T04:28:30,510 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-11T04:28:30,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:30,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:30,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:30,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/8beec5f9d12a4746b59447f72bd7a3af 2024-12-11T04:28:30,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e989927c02324740b92c7d4b282de356 is 50, key is test_row_0/B:col10/1733891309545/Put/seqid=0 2024-12-11T04:28:30,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742343_1519 (size=12151) 2024-12-11T04:28:30,590 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e989927c02324740b92c7d4b282de356 2024-12-11T04:28:30,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/db8bf3da5c8c476da6971ff255de19f8 is 50, key is test_row_0/C:col10/1733891309545/Put/seqid=0 2024-12-11T04:28:30,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742344_1520 (size=12151) 2024-12-11T04:28:30,663 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-11T04:28:30,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:30,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:30,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:30,664 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891370786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,789 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891370786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891370788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:30,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891370789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-11T04:28:30,816 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-11T04:28:30,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:30,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:30,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:30,816 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,968 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:30,969 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-11T04:28:30,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:30,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:30,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:30,969 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:30,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:31,002 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/db8bf3da5c8c476da6971ff255de19f8 2024-12-11T04:28:31,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/8beec5f9d12a4746b59447f72bd7a3af as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/8beec5f9d12a4746b59447f72bd7a3af 2024-12-11T04:28:31,009 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/8beec5f9d12a4746b59447f72bd7a3af, entries=200, sequenceid=200, filesize=14.2 K 2024-12-11T04:28:31,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e989927c02324740b92c7d4b282de356 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e989927c02324740b92c7d4b282de356 2024-12-11T04:28:31,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e989927c02324740b92c7d4b282de356, entries=150, sequenceid=200, filesize=11.9 K 2024-12-11T04:28:31,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/db8bf3da5c8c476da6971ff255de19f8 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/db8bf3da5c8c476da6971ff255de19f8 2024-12-11T04:28:31,016 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/db8bf3da5c8c476da6971ff255de19f8, entries=150, sequenceid=200, filesize=11.9 K 2024-12-11T04:28:31,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for c171d7ccfa412c571490e92799f7df2c in 853ms, sequenceid=200, compaction requested=false 2024-12-11T04:28:31,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:31,123 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,124 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-11T04:28:31,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:31,124 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-11T04:28:31,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:31,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:31,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:31,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:31,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:31,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:31,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/c23a838bbf634a9f85e4083528b1ce8e is 50, key is test_row_0/A:col10/1733891310171/Put/seqid=0 2024-12-11T04:28:31,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742345_1521 (size=12151) 2024-12-11T04:28:31,132 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/c23a838bbf634a9f85e4083528b1ce8e 2024-12-11T04:28:31,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/c71932644de04a919e2854dfc52043af is 50, key is test_row_0/B:col10/1733891310171/Put/seqid=0 2024-12-11T04:28:31,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742346_1522 (size=12151) 2024-12-11T04:28:31,294 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:31,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:31,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-11T04:28:31,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891371326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891371327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891371328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891371333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891371434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891371434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891371434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891371442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,543 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/c71932644de04a919e2854dfc52043af 2024-12-11T04:28:31,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/0525998891e14a9eaa8ceeef0254bd6f is 50, key is test_row_0/C:col10/1733891310171/Put/seqid=0 2024-12-11T04:28:31,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742347_1523 (size=12151) 2024-12-11T04:28:31,571 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/0525998891e14a9eaa8ceeef0254bd6f 2024-12-11T04:28:31,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/c23a838bbf634a9f85e4083528b1ce8e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/c23a838bbf634a9f85e4083528b1ce8e 2024-12-11T04:28:31,579 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/c23a838bbf634a9f85e4083528b1ce8e, entries=150, sequenceid=210, filesize=11.9 K 2024-12-11T04:28:31,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/c71932644de04a919e2854dfc52043af as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/c71932644de04a919e2854dfc52043af 2024-12-11T04:28:31,583 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/c71932644de04a919e2854dfc52043af, entries=150, sequenceid=210, filesize=11.9 K 2024-12-11T04:28:31,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/0525998891e14a9eaa8ceeef0254bd6f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/0525998891e14a9eaa8ceeef0254bd6f 2024-12-11T04:28:31,588 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/0525998891e14a9eaa8ceeef0254bd6f, entries=150, sequenceid=210, filesize=11.9 K 2024-12-11T04:28:31,588 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for c171d7ccfa412c571490e92799f7df2c in 464ms, sequenceid=210, compaction requested=true 2024-12-11T04:28:31,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:31,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:31,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-12-11T04:28:31,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-12-11T04:28:31,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-11T04:28:31,590 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3840 sec 2024-12-11T04:28:31,591 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.3880 sec 2024-12-11T04:28:31,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:31,643 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-11T04:28:31,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:31,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:31,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:31,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:31,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:31,643 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:31,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/2753f9a15ca444bb80206a319a375a7c is 50, key is test_row_0/A:col10/1733891311641/Put/seqid=0 2024-12-11T04:28:31,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742348_1524 (size=14541) 2024-12-11T04:28:31,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/2753f9a15ca444bb80206a319a375a7c 2024-12-11T04:28:31,654 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891371651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/b9845fc231d348dba09401b55c91142c is 50, key is test_row_0/B:col10/1733891311641/Put/seqid=0 2024-12-11T04:28:31,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891371653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891371654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891371654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742349_1525 (size=12151) 2024-12-11T04:28:31,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891371755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891371760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891371763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891371763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891371960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891371966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891371968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:31,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:31,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891371969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/b9845fc231d348dba09401b55c91142c 2024-12-11T04:28:32,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/b41d85bae2e64bc989e52128e9c6d118 is 50, key is test_row_0/C:col10/1733891311641/Put/seqid=0 2024-12-11T04:28:32,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742350_1526 (size=12151) 2024-12-11T04:28:32,082 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/b41d85bae2e64bc989e52128e9c6d118 2024-12-11T04:28:32,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/2753f9a15ca444bb80206a319a375a7c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/2753f9a15ca444bb80206a319a375a7c 2024-12-11T04:28:32,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/2753f9a15ca444bb80206a319a375a7c, entries=200, sequenceid=237, filesize=14.2 K 2024-12-11T04:28:32,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/b9845fc231d348dba09401b55c91142c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/b9845fc231d348dba09401b55c91142c 2024-12-11T04:28:32,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/b9845fc231d348dba09401b55c91142c, entries=150, sequenceid=237, filesize=11.9 K 2024-12-11T04:28:32,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/b41d85bae2e64bc989e52128e9c6d118 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/b41d85bae2e64bc989e52128e9c6d118 2024-12-11T04:28:32,098 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/b41d85bae2e64bc989e52128e9c6d118, entries=150, sequenceid=237, filesize=11.9 K 2024-12-11T04:28:32,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for c171d7ccfa412c571490e92799f7df2c in 455ms, sequenceid=237, compaction requested=true 2024-12-11T04:28:32,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:32,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:32,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:32,099 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:32,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:32,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:32,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:32,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:32,099 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:32,100 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:32,100 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53794 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:32,100 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/B is initiating minor compaction (all files) 2024-12-11T04:28:32,100 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/A is initiating minor compaction (all files) 2024-12-11T04:28:32,100 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/A in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:32,100 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/B in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:32,100 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/e2a9b215d467461abae154b38f946cb7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/8beec5f9d12a4746b59447f72bd7a3af, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/c23a838bbf634a9f85e4083528b1ce8e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/2753f9a15ca444bb80206a319a375a7c] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=52.5 K 2024-12-11T04:28:32,100 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/b464acbbd0e84a0f9cdbc924be8dd7f9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e989927c02324740b92c7d4b282de356, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/c71932644de04a919e2854dfc52043af, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/b9845fc231d348dba09401b55c91142c] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=47.9 K 2024-12-11T04:28:32,100 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting b464acbbd0e84a0f9cdbc924be8dd7f9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733891308040 2024-12-11T04:28:32,100 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2a9b215d467461abae154b38f946cb7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733891308040 2024-12-11T04:28:32,100 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e989927c02324740b92c7d4b282de356, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733891309544 2024-12-11T04:28:32,100 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8beec5f9d12a4746b59447f72bd7a3af, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733891309544 2024-12-11T04:28:32,101 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting c71932644de04a919e2854dfc52043af, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733891310169 2024-12-11T04:28:32,101 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting c23a838bbf634a9f85e4083528b1ce8e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733891310169 2024-12-11T04:28:32,101 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting b9845fc231d348dba09401b55c91142c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733891311325 2024-12-11T04:28:32,101 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2753f9a15ca444bb80206a319a375a7c, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733891311325 2024-12-11T04:28:32,110 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#B#compaction#447 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:32,111 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/3fe1fe5356294be9a845e21a6577c308 is 50, key is test_row_0/B:col10/1733891311641/Put/seqid=0 2024-12-11T04:28:32,122 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#A#compaction#448 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:32,123 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/224f777bdf154badb492e03d23fd40f2 is 50, key is test_row_0/A:col10/1733891311641/Put/seqid=0 2024-12-11T04:28:32,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742351_1527 (size=12697) 2024-12-11T04:28:32,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742352_1528 (size=12697) 2024-12-11T04:28:32,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:32,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:28:32,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:32,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:32,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:32,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:32,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:32,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:32,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/0dcda12cc299463d9d4a39d1a0641eb7 is 50, key is test_row_0/A:col10/1733891311650/Put/seqid=0 2024-12-11T04:28:32,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742353_1529 (size=14541) 2024-12-11T04:28:32,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891372292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891372300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891372301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-11T04:28:32,308 INFO [Thread-2131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-12-11T04:28:32,310 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:32,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees 2024-12-11T04:28:32,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,311 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:32,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891372302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-11T04:28:32,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891372303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,311 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:32,311 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:32,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891372404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-11T04:28:32,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891372409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891372409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891372412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891372412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,463 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-11T04:28:32,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:32,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:32,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:32,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:32,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:32,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:32,558 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/3fe1fe5356294be9a845e21a6577c308 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/3fe1fe5356294be9a845e21a6577c308 2024-12-11T04:28:32,560 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/224f777bdf154badb492e03d23fd40f2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/224f777bdf154badb492e03d23fd40f2 2024-12-11T04:28:32,563 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/B of c171d7ccfa412c571490e92799f7df2c into 3fe1fe5356294be9a845e21a6577c308(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:32,563 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:32,563 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/B, priority=12, startTime=1733891312099; duration=0sec 2024-12-11T04:28:32,564 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:32,564 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:B 2024-12-11T04:28:32,564 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:32,565 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/A of c171d7ccfa412c571490e92799f7df2c into 224f777bdf154badb492e03d23fd40f2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:32,565 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:32,565 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/A, priority=12, startTime=1733891312098; duration=0sec 2024-12-11T04:28:32,565 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:32,565 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:A 2024-12-11T04:28:32,566 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:32,566 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/C is initiating minor compaction (all files) 2024-12-11T04:28:32,566 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/C in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:32,566 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/350f4459821a4110960c859cfc1af35e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/db8bf3da5c8c476da6971ff255de19f8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/0525998891e14a9eaa8ceeef0254bd6f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/b41d85bae2e64bc989e52128e9c6d118] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=47.9 K 2024-12-11T04:28:32,566 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 350f4459821a4110960c859cfc1af35e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1733891308040 2024-12-11T04:28:32,567 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting db8bf3da5c8c476da6971ff255de19f8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1733891309544 2024-12-11T04:28:32,567 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 0525998891e14a9eaa8ceeef0254bd6f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1733891310169 2024-12-11T04:28:32,567 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting b41d85bae2e64bc989e52128e9c6d118, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733891311325 2024-12-11T04:28:32,575 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#C#compaction#450 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:32,575 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/2a769496e32848c59434ed32a46f6273 is 50, key is test_row_0/C:col10/1733891311641/Put/seqid=0 2024-12-11T04:28:32,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742354_1530 (size=12697) 2024-12-11T04:28:32,610 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891372608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-11T04:28:32,616 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-11T04:28:32,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:32,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:32,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:32,617 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:32,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:32,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:32,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891372614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891372614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891372617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891372618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/0dcda12cc299463d9d4a39d1a0641eb7 2024-12-11T04:28:32,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/68197d217bc046098db985188da56474 is 50, key is test_row_0/B:col10/1733891311650/Put/seqid=0 2024-12-11T04:28:32,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742355_1531 (size=12151) 2024-12-11T04:28:32,769 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,769 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-11T04:28:32,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:32,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:32,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:32,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:32,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:32,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:32,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-11T04:28:32,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891372912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891372919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,921 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-11T04:28:32,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:32,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:32,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:32,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:32,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:32,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:32,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891372921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891372924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:32,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891372926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:32,983 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/2a769496e32848c59434ed32a46f6273 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/2a769496e32848c59434ed32a46f6273 2024-12-11T04:28:32,986 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/C of c171d7ccfa412c571490e92799f7df2c into 2a769496e32848c59434ed32a46f6273(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:32,986 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:32,986 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/C, priority=12, startTime=1733891312099; duration=0sec 2024-12-11T04:28:32,986 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:32,986 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:C 2024-12-11T04:28:33,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/68197d217bc046098db985188da56474 2024-12-11T04:28:33,058 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/aedf5cbbc42f40518ce67aaaefa1f62e is 50, key is test_row_0/C:col10/1733891311650/Put/seqid=0 2024-12-11T04:28:33,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742356_1532 (size=12151) 2024-12-11T04:28:33,074 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:33,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-11T04:28:33,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:33,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:33,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:33,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:33,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:33,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:33,226 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:33,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-11T04:28:33,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:33,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:33,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:33,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:33,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:33,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:33,379 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:33,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-11T04:28:33,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:33,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:33,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:33,380 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:33,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:33,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:33,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-11T04:28:33,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:33,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891373421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:33,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:33,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891373424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:33,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:33,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891373428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:33,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:33,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891373429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:33,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:33,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891373429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:33,462 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/aedf5cbbc42f40518ce67aaaefa1f62e 2024-12-11T04:28:33,466 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/0dcda12cc299463d9d4a39d1a0641eb7 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/0dcda12cc299463d9d4a39d1a0641eb7 2024-12-11T04:28:33,470 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/0dcda12cc299463d9d4a39d1a0641eb7, entries=200, sequenceid=248, filesize=14.2 K 2024-12-11T04:28:33,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/68197d217bc046098db985188da56474 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/68197d217bc046098db985188da56474 2024-12-11T04:28:33,473 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/68197d217bc046098db985188da56474, entries=150, sequenceid=248, filesize=11.9 K 2024-12-11T04:28:33,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/aedf5cbbc42f40518ce67aaaefa1f62e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/aedf5cbbc42f40518ce67aaaefa1f62e 2024-12-11T04:28:33,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/aedf5cbbc42f40518ce67aaaefa1f62e, entries=150, sequenceid=248, filesize=11.9 K 2024-12-11T04:28:33,478 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c171d7ccfa412c571490e92799f7df2c in 1243ms, sequenceid=248, compaction requested=false 2024-12-11T04:28:33,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:33,532 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:33,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-11T04:28:33,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:33,533 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T04:28:33,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:33,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:33,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:33,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:33,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:33,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:33,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/cd288610dc4548e28b1400c7ee0c27ec is 50, key is test_row_0/A:col10/1733891312301/Put/seqid=0 2024-12-11T04:28:33,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742357_1533 (size=12301) 2024-12-11T04:28:33,541 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/cd288610dc4548e28b1400c7ee0c27ec 2024-12-11T04:28:33,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5f52cef444d84371baaf16ae5d22fda9 is 50, key is test_row_0/B:col10/1733891312301/Put/seqid=0 2024-12-11T04:28:33,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742358_1534 (size=12301) 2024-12-11T04:28:33,551 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5f52cef444d84371baaf16ae5d22fda9 2024-12-11T04:28:33,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/f8b2eb2b6dc94b0da9b78103683ae6a6 is 50, key is test_row_0/C:col10/1733891312301/Put/seqid=0 2024-12-11T04:28:33,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742359_1535 (size=12301) 2024-12-11T04:28:33,962 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/f8b2eb2b6dc94b0da9b78103683ae6a6 2024-12-11T04:28:33,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/cd288610dc4548e28b1400c7ee0c27ec as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cd288610dc4548e28b1400c7ee0c27ec 2024-12-11T04:28:33,970 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cd288610dc4548e28b1400c7ee0c27ec, entries=150, sequenceid=276, filesize=12.0 K 2024-12-11T04:28:33,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5f52cef444d84371baaf16ae5d22fda9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5f52cef444d84371baaf16ae5d22fda9 2024-12-11T04:28:33,973 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5f52cef444d84371baaf16ae5d22fda9, entries=150, sequenceid=276, filesize=12.0 K 2024-12-11T04:28:33,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/f8b2eb2b6dc94b0da9b78103683ae6a6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/f8b2eb2b6dc94b0da9b78103683ae6a6 2024-12-11T04:28:33,977 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/f8b2eb2b6dc94b0da9b78103683ae6a6, entries=150, sequenceid=276, filesize=12.0 K 2024-12-11T04:28:33,978 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for c171d7ccfa412c571490e92799f7df2c in 446ms, sequenceid=276, compaction requested=true 2024-12-11T04:28:33,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:33,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:33,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=151 2024-12-11T04:28:33,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=151 2024-12-11T04:28:33,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-11T04:28:33,980 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6680 sec 2024-12-11T04:28:33,982 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees in 1.6710 sec 2024-12-11T04:28:34,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-11T04:28:34,414 INFO [Thread-2131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-12-11T04:28:34,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:34,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees 2024-12-11T04:28:34,417 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:34,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-11T04:28:34,418 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:34,418 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:34,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:34,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:28:34,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:34,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:34,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:34,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:34,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:34,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:34,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/e19231da05cc4daa90b87226803b637f is 50, key is test_row_0/A:col10/1733891314441/Put/seqid=0 2024-12-11T04:28:34,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742360_1536 (size=19621) 2024-12-11T04:28:34,461 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/e19231da05cc4daa90b87226803b637f 2024-12-11T04:28:34,468 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/cd87fa4a85214ce4ad5302a73c21398f is 50, key is test_row_0/B:col10/1733891314441/Put/seqid=0 2024-12-11T04:28:34,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742361_1537 (size=12301) 2024-12-11T04:28:34,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891374469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891374470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,479 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891374474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891374474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891374479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-11T04:28:34,570 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-11T04:28:34,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:34,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:34,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:34,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:34,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:34,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:34,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891374580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891374580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891374580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891374580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891374585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-11T04:28:34,722 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-11T04:28:34,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:34,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:34,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:34,723 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:34,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:34,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:34,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891374784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891374785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891374787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891374787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:34,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891374789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/cd87fa4a85214ce4ad5302a73c21398f 2024-12-11T04:28:34,875 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:34,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-11T04:28:34,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:34,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:34,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:34,876 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:34,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:34,879 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/8776c16f2a4446b8ab62baf097fa5a64 is 50, key is test_row_0/C:col10/1733891314441/Put/seqid=0 2024-12-11T04:28:34,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:34,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742362_1538 (size=12301) 2024-12-11T04:28:34,884 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/8776c16f2a4446b8ab62baf097fa5a64 2024-12-11T04:28:34,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/e19231da05cc4daa90b87226803b637f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/e19231da05cc4daa90b87226803b637f 2024-12-11T04:28:34,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/e19231da05cc4daa90b87226803b637f, entries=300, sequenceid=287, filesize=19.2 K 2024-12-11T04:28:34,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/cd87fa4a85214ce4ad5302a73c21398f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/cd87fa4a85214ce4ad5302a73c21398f 2024-12-11T04:28:34,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/cd87fa4a85214ce4ad5302a73c21398f, entries=150, sequenceid=287, filesize=12.0 K 2024-12-11T04:28:34,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/8776c16f2a4446b8ab62baf097fa5a64 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/8776c16f2a4446b8ab62baf097fa5a64 2024-12-11T04:28:34,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/8776c16f2a4446b8ab62baf097fa5a64, entries=150, sequenceid=287, filesize=12.0 K 2024-12-11T04:28:34,898 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for c171d7ccfa412c571490e92799f7df2c in 456ms, sequenceid=287, compaction requested=true 2024-12-11T04:28:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:34,898 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:34,898 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:34,898 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:34,899 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:34,899 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/B is initiating minor compaction (all files) 2024-12-11T04:28:34,899 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 59160 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:34,900 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/A is initiating minor compaction (all files) 2024-12-11T04:28:34,900 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/B in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:34,900 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/A in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:34,900 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/3fe1fe5356294be9a845e21a6577c308, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/68197d217bc046098db985188da56474, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5f52cef444d84371baaf16ae5d22fda9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/cd87fa4a85214ce4ad5302a73c21398f] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=48.3 K 2024-12-11T04:28:34,900 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/224f777bdf154badb492e03d23fd40f2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/0dcda12cc299463d9d4a39d1a0641eb7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cd288610dc4548e28b1400c7ee0c27ec, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/e19231da05cc4daa90b87226803b637f] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=57.8 K 2024-12-11T04:28:34,900 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 224f777bdf154badb492e03d23fd40f2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733891311325 2024-12-11T04:28:34,900 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fe1fe5356294be9a845e21a6577c308, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733891311325 2024-12-11T04:28:34,900 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 68197d217bc046098db985188da56474, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733891311650 2024-12-11T04:28:34,900 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0dcda12cc299463d9d4a39d1a0641eb7, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733891311650 2024-12-11T04:28:34,901 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f52cef444d84371baaf16ae5d22fda9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733891312300 2024-12-11T04:28:34,901 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd288610dc4548e28b1400c7ee0c27ec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733891312300 2024-12-11T04:28:34,901 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting cd87fa4a85214ce4ad5302a73c21398f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733891314436 2024-12-11T04:28:34,901 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting e19231da05cc4daa90b87226803b637f, keycount=300, bloomtype=ROW, size=19.2 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733891314434 2024-12-11T04:28:34,908 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#B#compaction#459 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:34,908 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/1b45ce39dfd946c38011dd8c4be14934 is 50, key is test_row_0/B:col10/1733891314441/Put/seqid=0 2024-12-11T04:28:34,909 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#A#compaction#460 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:34,910 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/cb5652319b094c039ee17babb5ee8df0 is 50, key is test_row_0/A:col10/1733891314441/Put/seqid=0 2024-12-11T04:28:34,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742363_1539 (size=12983) 2024-12-11T04:28:34,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742364_1540 (size=12983) 2024-12-11T04:28:34,925 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/1b45ce39dfd946c38011dd8c4be14934 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/1b45ce39dfd946c38011dd8c4be14934 2024-12-11T04:28:34,929 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/B of c171d7ccfa412c571490e92799f7df2c into 1b45ce39dfd946c38011dd8c4be14934(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:34,929 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:34,929 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/B, priority=12, startTime=1733891314898; duration=0sec 2024-12-11T04:28:34,929 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:34,929 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:B 2024-12-11T04:28:34,929 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:34,930 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:34,930 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/C is initiating minor compaction (all files) 2024-12-11T04:28:34,930 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/C in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:34,930 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/2a769496e32848c59434ed32a46f6273, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/aedf5cbbc42f40518ce67aaaefa1f62e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/f8b2eb2b6dc94b0da9b78103683ae6a6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/8776c16f2a4446b8ab62baf097fa5a64] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=48.3 K 2024-12-11T04:28:34,932 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a769496e32848c59434ed32a46f6273, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1733891311325 2024-12-11T04:28:34,932 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting aedf5cbbc42f40518ce67aaaefa1f62e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1733891311650 2024-12-11T04:28:34,932 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting f8b2eb2b6dc94b0da9b78103683ae6a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733891312300 2024-12-11T04:28:34,932 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 8776c16f2a4446b8ab62baf097fa5a64, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733891314436 2024-12-11T04:28:34,939 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#C#compaction#461 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:34,939 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/ec064d0c2e3c420a97f2287fcca69fe0 is 50, key is test_row_0/C:col10/1733891314441/Put/seqid=0 2024-12-11T04:28:34,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742365_1541 (size=12983) 2024-12-11T04:28:35,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-11T04:28:35,031 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-11T04:28:35,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:35,032 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T04:28:35,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:35,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:35,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:35,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:35,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:35,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:35,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/cf07d7463f3d4eb0a617ba37bc3bf2dc is 50, key is test_row_0/A:col10/1733891314477/Put/seqid=0 2024-12-11T04:28:35,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742366_1542 (size=12301) 2024-12-11T04:28:35,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:35,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:35,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891375101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891375101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891375102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891375103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891375103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891375207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891375208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891375209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891375209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891375209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,324 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/cb5652319b094c039ee17babb5ee8df0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cb5652319b094c039ee17babb5ee8df0 2024-12-11T04:28:35,328 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/A of c171d7ccfa412c571490e92799f7df2c into cb5652319b094c039ee17babb5ee8df0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:35,328 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:35,328 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/A, priority=12, startTime=1733891314898; duration=0sec 2024-12-11T04:28:35,329 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:35,329 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:A 2024-12-11T04:28:35,346 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/ec064d0c2e3c420a97f2287fcca69fe0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/ec064d0c2e3c420a97f2287fcca69fe0 2024-12-11T04:28:35,350 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/C of c171d7ccfa412c571490e92799f7df2c into ec064d0c2e3c420a97f2287fcca69fe0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:35,350 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:35,350 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/C, priority=12, startTime=1733891314898; duration=0sec 2024-12-11T04:28:35,350 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:35,350 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:C 2024-12-11T04:28:35,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891375412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891375413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891375413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891375413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891375415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,441 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/cf07d7463f3d4eb0a617ba37bc3bf2dc 2024-12-11T04:28:35,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/4e016c705d96478399ec98dbe0ba9e64 is 50, key is test_row_0/B:col10/1733891314477/Put/seqid=0 2024-12-11T04:28:35,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742367_1543 (size=12301) 2024-12-11T04:28:35,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-11T04:28:35,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891375716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891375717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891375718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,720 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891375718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:35,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891375718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:35,852 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/4e016c705d96478399ec98dbe0ba9e64 2024-12-11T04:28:35,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/772e6cc729a64ae59d11803badfc1fe9 is 50, key is test_row_0/C:col10/1733891314477/Put/seqid=0 2024-12-11T04:28:35,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742368_1544 (size=12301) 2024-12-11T04:28:35,863 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/772e6cc729a64ae59d11803badfc1fe9 2024-12-11T04:28:35,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/cf07d7463f3d4eb0a617ba37bc3bf2dc as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cf07d7463f3d4eb0a617ba37bc3bf2dc 2024-12-11T04:28:35,870 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cf07d7463f3d4eb0a617ba37bc3bf2dc, entries=150, sequenceid=313, filesize=12.0 K 2024-12-11T04:28:35,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/4e016c705d96478399ec98dbe0ba9e64 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/4e016c705d96478399ec98dbe0ba9e64 2024-12-11T04:28:35,874 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/4e016c705d96478399ec98dbe0ba9e64, entries=150, sequenceid=313, filesize=12.0 K 2024-12-11T04:28:35,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/772e6cc729a64ae59d11803badfc1fe9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/772e6cc729a64ae59d11803badfc1fe9 2024-12-11T04:28:35,879 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/772e6cc729a64ae59d11803badfc1fe9, entries=150, sequenceid=313, filesize=12.0 K 2024-12-11T04:28:35,880 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c171d7ccfa412c571490e92799f7df2c in 847ms, sequenceid=313, compaction requested=false 2024-12-11T04:28:35,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:35,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:35,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-11T04:28:35,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-11T04:28:35,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-11T04:28:35,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4630 sec 2024-12-11T04:28:35,884 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees in 1.4670 sec 2024-12-11T04:28:36,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:36,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T04:28:36,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:36,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:36,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:36,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:36,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:36,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:36,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/5612d87b735c4590819278073744e492 is 50, key is test_row_0/A:col10/1733891315101/Put/seqid=0 2024-12-11T04:28:36,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742369_1545 (size=14741) 2024-12-11T04:28:36,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891376252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891376253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891376253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891376254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,262 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891376254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,362 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891376358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891376362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891376362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891376362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891376362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-11T04:28:36,522 INFO [Thread-2131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-12-11T04:28:36,523 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:36,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees 2024-12-11T04:28:36,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-11T04:28:36,524 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:36,525 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=154, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:36,525 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:36,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891376563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891376565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891376565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891376569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,573 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891376569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-11T04:28:36,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/5612d87b735c4590819278073744e492 2024-12-11T04:28:36,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/7f1304bb30a34364b4ab1420998a6272 is 50, key is test_row_0/B:col10/1733891315101/Put/seqid=0 2024-12-11T04:28:36,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742370_1546 (size=12301) 2024-12-11T04:28:36,676 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,677 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-11T04:28:36,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:36,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:36,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:36,677 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:36,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:36,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:36,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-11T04:28:36,829 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,830 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-11T04:28:36,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:36,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:36,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:36,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:36,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:36,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:36,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891376868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891376869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891376870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891376874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891376875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,982 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:36,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-11T04:28:36,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:36,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:36,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:36,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] handler.RSProcedureHandler(58): pid=155 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:36,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=155 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:36,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=155 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:37,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/7f1304bb30a34364b4ab1420998a6272 2024-12-11T04:28:37,053 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/32ccfed076c24929ae68292f851e7475 is 50, key is test_row_0/C:col10/1733891315101/Put/seqid=0 2024-12-11T04:28:37,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742371_1547 (size=12301) 2024-12-11T04:28:37,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/32ccfed076c24929ae68292f851e7475 2024-12-11T04:28:37,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/5612d87b735c4590819278073744e492 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/5612d87b735c4590819278073744e492 2024-12-11T04:28:37,063 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/5612d87b735c4590819278073744e492, entries=200, sequenceid=327, filesize=14.4 K 2024-12-11T04:28:37,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/7f1304bb30a34364b4ab1420998a6272 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/7f1304bb30a34364b4ab1420998a6272 2024-12-11T04:28:37,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/7f1304bb30a34364b4ab1420998a6272, entries=150, sequenceid=327, filesize=12.0 K 2024-12-11T04:28:37,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/32ccfed076c24929ae68292f851e7475 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/32ccfed076c24929ae68292f851e7475 2024-12-11T04:28:37,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/32ccfed076c24929ae68292f851e7475, entries=150, sequenceid=327, filesize=12.0 K 2024-12-11T04:28:37,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for c171d7ccfa412c571490e92799f7df2c in 845ms, sequenceid=327, compaction requested=true 2024-12-11T04:28:37,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:37,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:37,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:37,072 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:37,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:37,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:37,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:37,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:37,072 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:37,073 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:37,073 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/A is initiating minor compaction (all files) 2024-12-11T04:28:37,073 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:37,073 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/B is initiating minor compaction (all files) 2024-12-11T04:28:37,073 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/A in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:37,073 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/B in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:37,073 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cb5652319b094c039ee17babb5ee8df0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cf07d7463f3d4eb0a617ba37bc3bf2dc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/5612d87b735c4590819278073744e492] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=39.1 K 2024-12-11T04:28:37,073 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/1b45ce39dfd946c38011dd8c4be14934, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/4e016c705d96478399ec98dbe0ba9e64, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/7f1304bb30a34364b4ab1420998a6272] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=36.7 K 2024-12-11T04:28:37,073 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb5652319b094c039ee17babb5ee8df0, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733891314436 2024-12-11T04:28:37,073 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b45ce39dfd946c38011dd8c4be14934, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733891314436 2024-12-11T04:28:37,074 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e016c705d96478399ec98dbe0ba9e64, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733891314469 2024-12-11T04:28:37,074 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf07d7463f3d4eb0a617ba37bc3bf2dc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733891314469 2024-12-11T04:28:37,074 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f1304bb30a34364b4ab1420998a6272, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733891315101 2024-12-11T04:28:37,074 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5612d87b735c4590819278073744e492, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733891315097 2024-12-11T04:28:37,081 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#B#compaction#468 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:37,081 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e80c42f25227404ba73d35f19ad5a34e is 50, key is test_row_0/B:col10/1733891315101/Put/seqid=0 2024-12-11T04:28:37,084 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#A#compaction#469 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:37,085 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/815e142daeed43b289f10284e24261cc is 50, key is test_row_0/A:col10/1733891315101/Put/seqid=0 2024-12-11T04:28:37,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742372_1548 (size=13085) 2024-12-11T04:28:37,091 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e80c42f25227404ba73d35f19ad5a34e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e80c42f25227404ba73d35f19ad5a34e 2024-12-11T04:28:37,095 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/B of c171d7ccfa412c571490e92799f7df2c into e80c42f25227404ba73d35f19ad5a34e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:37,095 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:37,095 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/B, priority=13, startTime=1733891317072; duration=0sec 2024-12-11T04:28:37,095 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:37,095 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:B 2024-12-11T04:28:37,095 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:37,096 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:37,096 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/C is initiating minor compaction (all files) 2024-12-11T04:28:37,096 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/C in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:37,096 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/ec064d0c2e3c420a97f2287fcca69fe0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/772e6cc729a64ae59d11803badfc1fe9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/32ccfed076c24929ae68292f851e7475] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=36.7 K 2024-12-11T04:28:37,097 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ec064d0c2e3c420a97f2287fcca69fe0, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733891314436 2024-12-11T04:28:37,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742373_1549 (size=13085) 2024-12-11T04:28:37,098 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 772e6cc729a64ae59d11803badfc1fe9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733891314469 2024-12-11T04:28:37,098 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 32ccfed076c24929ae68292f851e7475, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733891315101 2024-12-11T04:28:37,104 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/815e142daeed43b289f10284e24261cc as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/815e142daeed43b289f10284e24261cc 2024-12-11T04:28:37,108 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/A of c171d7ccfa412c571490e92799f7df2c into 815e142daeed43b289f10284e24261cc(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:37,108 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:37,108 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/A, priority=13, startTime=1733891317072; duration=0sec 2024-12-11T04:28:37,108 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:37,108 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:A 2024-12-11T04:28:37,113 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#C#compaction#470 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:37,113 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/c8c9078feb4b49adad7cded71a3eda46 is 50, key is test_row_0/C:col10/1733891315101/Put/seqid=0 2024-12-11T04:28:37,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-11T04:28:37,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742374_1550 (size=13085) 2024-12-11T04:28:37,134 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/c8c9078feb4b49adad7cded71a3eda46 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/c8c9078feb4b49adad7cded71a3eda46 2024-12-11T04:28:37,135 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=155 2024-12-11T04:28:37,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:37,135 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T04:28:37,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:37,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:37,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:37,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:37,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:37,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:37,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/b19933c4eabe485f9425844a14d171fa is 50, key is test_row_0/A:col10/1733891316252/Put/seqid=0 2024-12-11T04:28:37,144 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/C of c171d7ccfa412c571490e92799f7df2c into c8c9078feb4b49adad7cded71a3eda46(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:37,144 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:37,144 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/C, priority=13, startTime=1733891317072; duration=0sec 2024-12-11T04:28:37,144 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:37,144 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:C 2024-12-11T04:28:37,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742375_1551 (size=12301) 2024-12-11T04:28:37,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:37,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:37,387 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891377382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891377383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891377384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891377385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891377387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891377488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891377488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891377489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891377489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891377493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,547 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/b19933c4eabe485f9425844a14d171fa 2024-12-11T04:28:37,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/d6a793c1196846e282a364bc5fe1162b is 50, key is test_row_0/B:col10/1733891316252/Put/seqid=0 2024-12-11T04:28:37,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742376_1552 (size=12301) 2024-12-11T04:28:37,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-11T04:28:37,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891377693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891377693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891377693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891377694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:37,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891377697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:37,959 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/d6a793c1196846e282a364bc5fe1162b 2024-12-11T04:28:37,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/6a87a403692448129d937e3f1e3b06f8 is 50, key is test_row_0/C:col10/1733891316252/Put/seqid=0 2024-12-11T04:28:37,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742377_1553 (size=12301) 2024-12-11T04:28:38,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891377998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891377998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891378000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,003 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891378000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891378002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,369 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=354 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/6a87a403692448129d937e3f1e3b06f8 2024-12-11T04:28:38,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/b19933c4eabe485f9425844a14d171fa as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b19933c4eabe485f9425844a14d171fa 2024-12-11T04:28:38,376 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b19933c4eabe485f9425844a14d171fa, entries=150, sequenceid=354, filesize=12.0 K 2024-12-11T04:28:38,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/d6a793c1196846e282a364bc5fe1162b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/d6a793c1196846e282a364bc5fe1162b 2024-12-11T04:28:38,380 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/d6a793c1196846e282a364bc5fe1162b, entries=150, sequenceid=354, filesize=12.0 K 2024-12-11T04:28:38,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/6a87a403692448129d937e3f1e3b06f8 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/6a87a403692448129d937e3f1e3b06f8 2024-12-11T04:28:38,384 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/6a87a403692448129d937e3f1e3b06f8, entries=150, sequenceid=354, filesize=12.0 K 2024-12-11T04:28:38,384 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c171d7ccfa412c571490e92799f7df2c in 1249ms, sequenceid=354, compaction requested=false 2024-12-11T04:28:38,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:38,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:38,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=155}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=155 2024-12-11T04:28:38,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=155 2024-12-11T04:28:38,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-12-11T04:28:38,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8610 sec 2024-12-11T04:28:38,388 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=154, table=TestAcidGuarantees in 1.8640 sec 2024-12-11T04:28:38,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:38,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T04:28:38,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:38,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:38,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:38,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:38,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:38,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:38,509 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/8d80b56b0684485c80dfe77e42021862 is 50, key is test_row_0/A:col10/1733891317384/Put/seqid=0 2024-12-11T04:28:38,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742378_1554 (size=14741) 2024-12-11T04:28:38,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/8d80b56b0684485c80dfe77e42021862 2024-12-11T04:28:38,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5dee19a1c2fd47bdbf0b4ed83f7c4f98 is 50, key is test_row_0/B:col10/1733891317384/Put/seqid=0 2024-12-11T04:28:38,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742379_1555 (size=12301) 2024-12-11T04:28:38,534 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5dee19a1c2fd47bdbf0b4ed83f7c4f98 2024-12-11T04:28:38,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891378531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891378533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,540 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/5ddc236dd9604490bc4f68b799aa953d is 50, key is test_row_0/C:col10/1733891317384/Put/seqid=0 2024-12-11T04:28:38,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742380_1556 (size=12301) 2024-12-11T04:28:38,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891378538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891378539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891378540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-11T04:28:38,628 INFO [Thread-2131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-12-11T04:28:38,629 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:38,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees 2024-12-11T04:28:38,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-11T04:28:38,631 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:38,631 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=156, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:38,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:38,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891378639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891378640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891378645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891378645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891378645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-11T04:28:38,783 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-11T04:28:38,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:38,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:38,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:38,783 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:38,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:38,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:38,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891378845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891378846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891378850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891378851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:38,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891378852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-11T04:28:38,935 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:38,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-11T04:28:38,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:38,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:38,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:38,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] handler.RSProcedureHandler(58): pid=157 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:38,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=157 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:38,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=157 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:38,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/5ddc236dd9604490bc4f68b799aa953d 2024-12-11T04:28:38,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/8d80b56b0684485c80dfe77e42021862 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/8d80b56b0684485c80dfe77e42021862 2024-12-11T04:28:38,952 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/8d80b56b0684485c80dfe77e42021862, entries=200, sequenceid=367, filesize=14.4 K 2024-12-11T04:28:38,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/5dee19a1c2fd47bdbf0b4ed83f7c4f98 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5dee19a1c2fd47bdbf0b4ed83f7c4f98 2024-12-11T04:28:38,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5dee19a1c2fd47bdbf0b4ed83f7c4f98, entries=150, sequenceid=367, filesize=12.0 K 2024-12-11T04:28:38,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/5ddc236dd9604490bc4f68b799aa953d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5ddc236dd9604490bc4f68b799aa953d 2024-12-11T04:28:38,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5ddc236dd9604490bc4f68b799aa953d, entries=150, sequenceid=367, filesize=12.0 K 2024-12-11T04:28:38,962 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for c171d7ccfa412c571490e92799f7df2c in 458ms, sequenceid=367, compaction requested=true 2024-12-11T04:28:38,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:38,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:38,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:38,963 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:38,963 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:38,963 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:38,964 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/B is initiating minor compaction (all files) 2024-12-11T04:28:38,964 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/B in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:38,964 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e80c42f25227404ba73d35f19ad5a34e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/d6a793c1196846e282a364bc5fe1162b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5dee19a1c2fd47bdbf0b4ed83f7c4f98] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=36.8 K 2024-12-11T04:28:38,964 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40127 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:38,964 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/A is initiating minor compaction (all files) 2024-12-11T04:28:38,964 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/A in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:38,964 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/815e142daeed43b289f10284e24261cc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b19933c4eabe485f9425844a14d171fa, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/8d80b56b0684485c80dfe77e42021862] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=39.2 K 2024-12-11T04:28:38,965 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e80c42f25227404ba73d35f19ad5a34e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733891315101 2024-12-11T04:28:38,965 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 815e142daeed43b289f10284e24261cc, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733891315101 2024-12-11T04:28:38,965 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting d6a793c1196846e282a364bc5fe1162b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733891316252 2024-12-11T04:28:38,965 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting b19933c4eabe485f9425844a14d171fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733891316252 2024-12-11T04:28:38,965 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5dee19a1c2fd47bdbf0b4ed83f7c4f98, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733891317384 2024-12-11T04:28:38,966 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:38,966 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d80b56b0684485c80dfe77e42021862, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733891317383 2024-12-11T04:28:38,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:38,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:38,972 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:38,982 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#B#compaction#477 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:38,982 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/2e49d08532d848ba966e380621a7d094 is 50, key is test_row_0/B:col10/1733891317384/Put/seqid=0 2024-12-11T04:28:38,985 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#A#compaction#478 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:38,986 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/7b83620fcd224e2eb2b047e8604282a8 is 50, key is test_row_0/A:col10/1733891317384/Put/seqid=0 2024-12-11T04:28:39,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742382_1558 (size=13187) 2024-12-11T04:28:39,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742381_1557 (size=13187) 2024-12-11T04:28:39,027 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/2e49d08532d848ba966e380621a7d094 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/2e49d08532d848ba966e380621a7d094 2024-12-11T04:28:39,032 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/B of c171d7ccfa412c571490e92799f7df2c into 2e49d08532d848ba966e380621a7d094(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:39,032 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:39,032 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/B, priority=13, startTime=1733891318963; duration=0sec 2024-12-11T04:28:39,032 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:39,032 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:B 2024-12-11T04:28:39,032 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:39,033 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:39,033 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/C is initiating minor compaction (all files) 2024-12-11T04:28:39,033 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/C in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:39,034 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/c8c9078feb4b49adad7cded71a3eda46, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/6a87a403692448129d937e3f1e3b06f8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5ddc236dd9604490bc4f68b799aa953d] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=36.8 K 2024-12-11T04:28:39,034 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting c8c9078feb4b49adad7cded71a3eda46, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733891315101 2024-12-11T04:28:39,034 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a87a403692448129d937e3f1e3b06f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=354, earliestPutTs=1733891316252 2024-12-11T04:28:39,034 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ddc236dd9604490bc4f68b799aa953d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733891317384 2024-12-11T04:28:39,042 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#C#compaction#479 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:39,043 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/63e60deecf1a42efb9f14b1a40201992 is 50, key is test_row_0/C:col10/1733891317384/Put/seqid=0 2024-12-11T04:28:39,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742383_1559 (size=13187) 2024-12-11T04:28:39,088 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=157 2024-12-11T04:28:39,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:39,089 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T04:28:39,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:39,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:39,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:39,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:39,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:39,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:39,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/fff4b7cd099b469fbbad89ddf9711a39 is 50, key is test_row_0/A:col10/1733891318537/Put/seqid=0 2024-12-11T04:28:39,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742384_1560 (size=12301) 2024-12-11T04:28:39,098 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/fff4b7cd099b469fbbad89ddf9711a39 2024-12-11T04:28:39,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e58b4744126c471b98146ab46c17be91 is 50, key is test_row_0/B:col10/1733891318537/Put/seqid=0 2024-12-11T04:28:39,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742385_1561 (size=12301) 2024-12-11T04:28:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:39,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:39,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891379161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891379162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891379163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891379163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891379167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-11T04:28:39,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891379269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891379269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891379269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891379269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,276 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891379270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,419 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/7b83620fcd224e2eb2b047e8604282a8 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/7b83620fcd224e2eb2b047e8604282a8 2024-12-11T04:28:39,424 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/A of c171d7ccfa412c571490e92799f7df2c into 7b83620fcd224e2eb2b047e8604282a8(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:39,424 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:39,424 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/A, priority=13, startTime=1733891318962; duration=0sec 2024-12-11T04:28:39,425 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:39,425 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:A 2024-12-11T04:28:39,452 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/63e60deecf1a42efb9f14b1a40201992 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/63e60deecf1a42efb9f14b1a40201992 2024-12-11T04:28:39,455 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/C of c171d7ccfa412c571490e92799f7df2c into 63e60deecf1a42efb9f14b1a40201992(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:39,456 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:39,456 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/C, priority=13, startTime=1733891318969; duration=0sec 2024-12-11T04:28:39,456 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:39,456 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:C 2024-12-11T04:28:39,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891379473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891379473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891379474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891379473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,478 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891379477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,509 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e58b4744126c471b98146ab46c17be91 2024-12-11T04:28:39,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/e87fa2da298e48a19935393265f4d12d is 50, key is test_row_0/C:col10/1733891318537/Put/seqid=0 2024-12-11T04:28:39,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742386_1562 (size=12301) 2024-12-11T04:28:39,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-11T04:28:39,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891379779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891379779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891379780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891379780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:39,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891379781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:39,920 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/e87fa2da298e48a19935393265f4d12d 2024-12-11T04:28:39,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/fff4b7cd099b469fbbad89ddf9711a39 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/fff4b7cd099b469fbbad89ddf9711a39 2024-12-11T04:28:39,931 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/fff4b7cd099b469fbbad89ddf9711a39, entries=150, sequenceid=393, filesize=12.0 K 2024-12-11T04:28:39,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e58b4744126c471b98146ab46c17be91 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e58b4744126c471b98146ab46c17be91 2024-12-11T04:28:39,934 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e58b4744126c471b98146ab46c17be91, entries=150, sequenceid=393, filesize=12.0 K 2024-12-11T04:28:39,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/e87fa2da298e48a19935393265f4d12d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/e87fa2da298e48a19935393265f4d12d 2024-12-11T04:28:39,938 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/e87fa2da298e48a19935393265f4d12d, entries=150, sequenceid=393, filesize=12.0 K 2024-12-11T04:28:39,939 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for c171d7ccfa412c571490e92799f7df2c in 850ms, sequenceid=393, compaction requested=false 2024-12-11T04:28:39,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:39,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:39,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=157}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=157 2024-12-11T04:28:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=157 2024-12-11T04:28:39,941 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-11T04:28:39,941 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3090 sec 2024-12-11T04:28:39,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=156, table=TestAcidGuarantees in 1.3120 sec 2024-12-11T04:28:40,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:40,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-11T04:28:40,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:40,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:40,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:40,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:40,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:40,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:40,291 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/4a3c72c988b749a784d244183e3253a9 is 50, key is test_row_0/A:col10/1733891319162/Put/seqid=0 2024-12-11T04:28:40,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742387_1563 (size=14741) 2024-12-11T04:28:40,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891380312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891380313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891380314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891380316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891380319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891380420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891380420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891380421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891380421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891380422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891380623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891380623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891380628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891380629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891380629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/4a3c72c988b749a784d244183e3253a9 2024-12-11T04:28:40,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/68f96124229947c1abfb220ba32c4c2e is 50, key is test_row_0/B:col10/1733891319162/Put/seqid=0 2024-12-11T04:28:40,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742388_1564 (size=12301) 2024-12-11T04:28:40,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=156 2024-12-11T04:28:40,734 INFO [Thread-2131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 156 completed 2024-12-11T04:28:40,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:40,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=158, table=TestAcidGuarantees 2024-12-11T04:28:40,737 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=158, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=158, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:40,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-11T04:28:40,737 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=158, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=158, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:40,737 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:40,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-11T04:28:40,889 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-12-11T04:28:40,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:40,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:40,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:40,890 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] handler.RSProcedureHandler(58): pid=159 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:40,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=159 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:40,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=159 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:40,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891380927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891380928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891380935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891380935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:40,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:40,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891380936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:41,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-11T04:28:41,042 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:41,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-12-11T04:28:41,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:41,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:41,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:41,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] handler.RSProcedureHandler(58): pid=159 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:41,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=159 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:41,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=159 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:41,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/68f96124229947c1abfb220ba32c4c2e 2024-12-11T04:28:41,112 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/75d279b3ddae44f3a70b0482e74001a3 is 50, key is test_row_0/C:col10/1733891319162/Put/seqid=0 2024-12-11T04:28:41,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742389_1565 (size=12301) 2024-12-11T04:28:41,194 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:41,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-12-11T04:28:41,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:41,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:41,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:41,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] handler.RSProcedureHandler(58): pid=159 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:41,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=159 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:41,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=159 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:41,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-11T04:28:41,347 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:41,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-12-11T04:28:41,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:41,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:41,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:41,348 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] handler.RSProcedureHandler(58): pid=159 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:41,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=159 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:41,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=159 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:41,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:41,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891381430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:41,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:41,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891381433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:41,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:41,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891381439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:41,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:41,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891381440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:41,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:41,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891381440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:41,499 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:41,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-12-11T04:28:41,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:41,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:41,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:41,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] handler.RSProcedureHandler(58): pid=159 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:41,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=159 java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:41,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=159 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:41,516 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=407 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/75d279b3ddae44f3a70b0482e74001a3 2024-12-11T04:28:41,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/4a3c72c988b749a784d244183e3253a9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/4a3c72c988b749a784d244183e3253a9 2024-12-11T04:28:41,522 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/4a3c72c988b749a784d244183e3253a9, entries=200, sequenceid=407, filesize=14.4 K 2024-12-11T04:28:41,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/68f96124229947c1abfb220ba32c4c2e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/68f96124229947c1abfb220ba32c4c2e 2024-12-11T04:28:41,525 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/68f96124229947c1abfb220ba32c4c2e, entries=150, sequenceid=407, filesize=12.0 K 2024-12-11T04:28:41,526 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/75d279b3ddae44f3a70b0482e74001a3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/75d279b3ddae44f3a70b0482e74001a3 2024-12-11T04:28:41,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/75d279b3ddae44f3a70b0482e74001a3, entries=150, sequenceid=407, filesize=12.0 K 2024-12-11T04:28:41,529 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for c171d7ccfa412c571490e92799f7df2c in 1242ms, sequenceid=407, compaction requested=true 2024-12-11T04:28:41,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:41,529 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:41,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:41,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:41,530 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:41,530 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:41,530 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/A is initiating minor compaction (all files) 2024-12-11T04:28:41,530 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/A in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:41,531 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/7b83620fcd224e2eb2b047e8604282a8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/fff4b7cd099b469fbbad89ddf9711a39, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/4a3c72c988b749a784d244183e3253a9] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=39.3 K 2024-12-11T04:28:41,531 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b83620fcd224e2eb2b047e8604282a8, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733891317384 2024-12-11T04:28:41,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:41,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:41,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:41,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:41,531 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting fff4b7cd099b469fbbad89ddf9711a39, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733891318529 2024-12-11T04:28:41,532 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:41,532 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/B is initiating minor compaction (all files) 2024-12-11T04:28:41,532 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/B in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:41,532 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/2e49d08532d848ba966e380621a7d094, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e58b4744126c471b98146ab46c17be91, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/68f96124229947c1abfb220ba32c4c2e] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=36.9 K 2024-12-11T04:28:41,532 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a3c72c988b749a784d244183e3253a9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1733891319155 2024-12-11T04:28:41,532 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e49d08532d848ba966e380621a7d094, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733891317384 2024-12-11T04:28:41,532 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e58b4744126c471b98146ab46c17be91, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733891318529 2024-12-11T04:28:41,533 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 68f96124229947c1abfb220ba32c4c2e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1733891319162 2024-12-11T04:28:41,539 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#A#compaction#486 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:41,540 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/00e9d8eb5e1e4f2891617adf77e695ae is 50, key is test_row_0/A:col10/1733891319162/Put/seqid=0 2024-12-11T04:28:41,542 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#B#compaction#487 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:41,542 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/8963d413ceff413c9d34a04b9fb01749 is 50, key is test_row_0/B:col10/1733891319162/Put/seqid=0 2024-12-11T04:28:41,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742391_1567 (size=13289) 2024-12-11T04:28:41,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742390_1566 (size=13289) 2024-12-11T04:28:41,652 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:41,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=159 2024-12-11T04:28:41,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:41,653 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:28:41,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:41,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:41,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:41,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:41,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:41,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:41,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/857560d0019d44f7b9bfb18a89c591b8 is 50, key is test_row_0/A:col10/1733891320318/Put/seqid=0 2024-12-11T04:28:41,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742392_1568 (size=12301) 2024-12-11T04:28:41,660 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/857560d0019d44f7b9bfb18a89c591b8 2024-12-11T04:28:41,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/9e76ddecd5c74f8f9dc9eadaa6ed8e9e is 50, key is test_row_0/B:col10/1733891320318/Put/seqid=0 2024-12-11T04:28:41,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742393_1569 (size=12301) 2024-12-11T04:28:41,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-11T04:28:41,966 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/8963d413ceff413c9d34a04b9fb01749 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/8963d413ceff413c9d34a04b9fb01749 2024-12-11T04:28:41,966 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/00e9d8eb5e1e4f2891617adf77e695ae as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/00e9d8eb5e1e4f2891617adf77e695ae 2024-12-11T04:28:41,971 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/B of c171d7ccfa412c571490e92799f7df2c into 8963d413ceff413c9d34a04b9fb01749(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:41,971 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/A of c171d7ccfa412c571490e92799f7df2c into 00e9d8eb5e1e4f2891617adf77e695ae(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:41,971 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:41,971 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:41,971 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/B, priority=13, startTime=1733891321530; duration=0sec 2024-12-11T04:28:41,971 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/A, priority=13, startTime=1733891321529; duration=0sec 2024-12-11T04:28:41,971 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:41,971 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:B 2024-12-11T04:28:41,971 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:41,971 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:A 2024-12-11T04:28:41,971 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:41,973 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:41,973 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/C is initiating minor compaction (all files) 2024-12-11T04:28:41,973 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/C in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:41,973 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/63e60deecf1a42efb9f14b1a40201992, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/e87fa2da298e48a19935393265f4d12d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/75d279b3ddae44f3a70b0482e74001a3] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=36.9 K 2024-12-11T04:28:41,973 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 63e60deecf1a42efb9f14b1a40201992, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1733891317384 2024-12-11T04:28:41,974 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting e87fa2da298e48a19935393265f4d12d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733891318529 2024-12-11T04:28:41,974 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 75d279b3ddae44f3a70b0482e74001a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1733891319162 2024-12-11T04:28:41,980 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#C#compaction#490 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:41,981 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/de882b740a8a471fbaeda3cf919a3972 is 50, key is test_row_0/C:col10/1733891319162/Put/seqid=0 2024-12-11T04:28:41,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742394_1570 (size=13289) 2024-12-11T04:28:41,988 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/de882b740a8a471fbaeda3cf919a3972 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/de882b740a8a471fbaeda3cf919a3972 2024-12-11T04:28:41,991 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/C of c171d7ccfa412c571490e92799f7df2c into de882b740a8a471fbaeda3cf919a3972(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:41,991 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:41,991 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/C, priority=13, startTime=1733891321531; duration=0sec 2024-12-11T04:28:41,991 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:41,991 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:C 2024-12-11T04:28:42,070 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/9e76ddecd5c74f8f9dc9eadaa6ed8e9e 2024-12-11T04:28:42,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/5e81c96c9313468b8e88e94d922f9d90 is 50, key is test_row_0/C:col10/1733891320318/Put/seqid=0 2024-12-11T04:28:42,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742395_1571 (size=12301) 2024-12-11T04:28:42,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. as already flushing 2024-12-11T04:28:42,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:42,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:42,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49806 deadline: 1733891382450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:42,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49784 deadline: 1733891382450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:42,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49824 deadline: 1733891382451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:42,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49854 deadline: 1733891382451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:42,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:42,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49830 deadline: 1733891382452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:42,482 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/5e81c96c9313468b8e88e94d922f9d90 2024-12-11T04:28:42,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/857560d0019d44f7b9bfb18a89c591b8 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/857560d0019d44f7b9bfb18a89c591b8 2024-12-11T04:28:42,488 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/857560d0019d44f7b9bfb18a89c591b8, entries=150, sequenceid=431, filesize=12.0 K 2024-12-11T04:28:42,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/9e76ddecd5c74f8f9dc9eadaa6ed8e9e as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/9e76ddecd5c74f8f9dc9eadaa6ed8e9e 2024-12-11T04:28:42,490 DEBUG [Thread-2132 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dd48863 to 127.0.0.1:50078 2024-12-11T04:28:42,490 DEBUG [Thread-2132 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:42,492 DEBUG [Thread-2136 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1dc5e114 to 127.0.0.1:50078 2024-12-11T04:28:42,492 DEBUG [Thread-2136 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:42,492 DEBUG [Thread-2140 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17e5a47d to 127.0.0.1:50078 2024-12-11T04:28:42,492 DEBUG [Thread-2140 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:42,493 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/9e76ddecd5c74f8f9dc9eadaa6ed8e9e, entries=150, sequenceid=431, filesize=12.0 K 2024-12-11T04:28:42,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/5e81c96c9313468b8e88e94d922f9d90 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5e81c96c9313468b8e88e94d922f9d90 2024-12-11T04:28:42,496 DEBUG [Thread-2138 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3e96b8ad to 127.0.0.1:50078 2024-12-11T04:28:42,496 DEBUG [Thread-2138 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:42,496 DEBUG [Thread-2134 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51196534 to 127.0.0.1:50078 2024-12-11T04:28:42,496 DEBUG [Thread-2134 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:42,497 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5e81c96c9313468b8e88e94d922f9d90, entries=150, sequenceid=431, filesize=12.0 K 2024-12-11T04:28:42,497 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for c171d7ccfa412c571490e92799f7df2c in 845ms, sequenceid=431, compaction requested=false 2024-12-11T04:28:42,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:42,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:42,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=159}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=159 2024-12-11T04:28:42,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=159 2024-12-11T04:28:42,500 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-11T04:28:42,500 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7610 sec 2024-12-11T04:28:42,501 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=158, table=TestAcidGuarantees in 1.7650 sec 2024-12-11T04:28:42,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:42,556 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T04:28:42,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:42,556 DEBUG [Thread-2125 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b7f20c4 to 127.0.0.1:50078 2024-12-11T04:28:42,556 DEBUG [Thread-2125 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:42,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:42,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:42,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:42,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:42,556 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:42,559 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/42c2b32fc5f64eb4bfcef8caf69f8f25 is 50, key is test_row_0/A:col10/1733891322449/Put/seqid=0 2024-12-11T04:28:42,560 DEBUG [Thread-2121 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5871c039 to 127.0.0.1:50078 2024-12-11T04:28:42,560 DEBUG [Thread-2123 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7daa5922 to 127.0.0.1:50078 2024-12-11T04:28:42,560 DEBUG [Thread-2121 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:42,560 DEBUG [Thread-2123 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:42,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742396_1572 (size=12301) 2024-12-11T04:28:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-11T04:28:42,840 INFO [Thread-2131 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 158 completed 2024-12-11T04:28:42,963 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/42c2b32fc5f64eb4bfcef8caf69f8f25 2024-12-11T04:28:42,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/a1525925ec2b45bf9ac6a40f54151c05 is 50, key is test_row_0/B:col10/1733891322449/Put/seqid=0 2024-12-11T04:28:42,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742397_1573 (size=12301) 2024-12-11T04:28:43,372 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/a1525925ec2b45bf9ac6a40f54151c05 2024-12-11T04:28:43,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/f15372c6667a4343b591f88daa8778e5 is 50, key is test_row_0/C:col10/1733891322449/Put/seqid=0 2024-12-11T04:28:43,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742398_1574 (size=12301) 2024-12-11T04:28:43,780 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/f15372c6667a4343b591f88daa8778e5 2024-12-11T04:28:43,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/42c2b32fc5f64eb4bfcef8caf69f8f25 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/42c2b32fc5f64eb4bfcef8caf69f8f25 2024-12-11T04:28:43,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/42c2b32fc5f64eb4bfcef8caf69f8f25, entries=150, sequenceid=447, filesize=12.0 K 2024-12-11T04:28:43,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/a1525925ec2b45bf9ac6a40f54151c05 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/a1525925ec2b45bf9ac6a40f54151c05 2024-12-11T04:28:43,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/a1525925ec2b45bf9ac6a40f54151c05, entries=150, sequenceid=447, filesize=12.0 K 2024-12-11T04:28:43,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/f15372c6667a4343b591f88daa8778e5 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/f15372c6667a4343b591f88daa8778e5 2024-12-11T04:28:43,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/f15372c6667a4343b591f88daa8778e5, entries=150, sequenceid=447, filesize=12.0 K 2024-12-11T04:28:43,793 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=13.42 KB/13740 for c171d7ccfa412c571490e92799f7df2c in 1237ms, sequenceid=447, compaction requested=true 2024-12-11T04:28:43,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:43,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:43,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:43,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:43,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:43,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c171d7ccfa412c571490e92799f7df2c:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:43,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:43,793 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:43,793 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:43,794 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:43,794 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:43,794 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/B is initiating minor compaction (all files) 2024-12-11T04:28:43,794 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/A is initiating minor compaction (all files) 2024-12-11T04:28:43,794 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/B in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:43,794 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/A in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:43,794 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/8963d413ceff413c9d34a04b9fb01749, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/9e76ddecd5c74f8f9dc9eadaa6ed8e9e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/a1525925ec2b45bf9ac6a40f54151c05] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=37.0 K 2024-12-11T04:28:43,794 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/00e9d8eb5e1e4f2891617adf77e695ae, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/857560d0019d44f7b9bfb18a89c591b8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/42c2b32fc5f64eb4bfcef8caf69f8f25] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=37.0 K 2024-12-11T04:28:43,794 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 8963d413ceff413c9d34a04b9fb01749, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1733891319162 2024-12-11T04:28:43,794 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00e9d8eb5e1e4f2891617adf77e695ae, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1733891319162 2024-12-11T04:28:43,794 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e76ddecd5c74f8f9dc9eadaa6ed8e9e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1733891320304 2024-12-11T04:28:43,794 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 857560d0019d44f7b9bfb18a89c591b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1733891320304 2024-12-11T04:28:43,795 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting a1525925ec2b45bf9ac6a40f54151c05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1733891322449 2024-12-11T04:28:43,795 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42c2b32fc5f64eb4bfcef8caf69f8f25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1733891322449 2024-12-11T04:28:43,800 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#A#compaction#495 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:43,800 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#B#compaction#496 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:43,800 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/374394fb9215499982e553374dff06f2 is 50, key is test_row_0/A:col10/1733891322449/Put/seqid=0 2024-12-11T04:28:43,800 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/151ec97fcc4f423099dba6bd366d3ffb is 50, key is test_row_0/B:col10/1733891322449/Put/seqid=0 2024-12-11T04:28:43,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742399_1575 (size=13391) 2024-12-11T04:28:43,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742400_1576 (size=13391) 2024-12-11T04:28:44,207 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/374394fb9215499982e553374dff06f2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/374394fb9215499982e553374dff06f2 2024-12-11T04:28:44,207 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/151ec97fcc4f423099dba6bd366d3ffb as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/151ec97fcc4f423099dba6bd366d3ffb 2024-12-11T04:28:44,210 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/B of c171d7ccfa412c571490e92799f7df2c into 151ec97fcc4f423099dba6bd366d3ffb(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:44,210 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/A of c171d7ccfa412c571490e92799f7df2c into 374394fb9215499982e553374dff06f2(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:44,210 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:44,210 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:44,210 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/B, priority=13, startTime=1733891323793; duration=0sec 2024-12-11T04:28:44,210 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/A, priority=13, startTime=1733891323793; duration=0sec 2024-12-11T04:28:44,211 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:44,211 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:A 2024-12-11T04:28:44,211 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:44,211 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:B 2024-12-11T04:28:44,211 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:44,211 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:44,211 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): c171d7ccfa412c571490e92799f7df2c/C is initiating minor compaction (all files) 2024-12-11T04:28:44,211 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of c171d7ccfa412c571490e92799f7df2c/C in TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:44,211 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/de882b740a8a471fbaeda3cf919a3972, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5e81c96c9313468b8e88e94d922f9d90, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/f15372c6667a4343b591f88daa8778e5] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp, totalSize=37.0 K 2024-12-11T04:28:44,212 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting de882b740a8a471fbaeda3cf919a3972, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=407, earliestPutTs=1733891319162 2024-12-11T04:28:44,212 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e81c96c9313468b8e88e94d922f9d90, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1733891320304 2024-12-11T04:28:44,212 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting f15372c6667a4343b591f88daa8778e5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=447, earliestPutTs=1733891322449 2024-12-11T04:28:44,218 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c171d7ccfa412c571490e92799f7df2c#C#compaction#497 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:44,218 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/5233c8d17d154e2a9865242c07de76d7 is 50, key is test_row_0/C:col10/1733891322449/Put/seqid=0 2024-12-11T04:28:44,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742401_1577 (size=13391) 2024-12-11T04:28:44,475 DEBUG [Thread-2129 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41b0e7b6 to 127.0.0.1:50078 2024-12-11T04:28:44,475 DEBUG [Thread-2129 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:44,476 DEBUG [Thread-2127 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f7c40ba to 127.0.0.1:50078 2024-12-11T04:28:44,476 DEBUG [Thread-2127 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 66 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2905 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8715 rows 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2917 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8751 rows 2024-12-11T04:28:44,476 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2921 2024-12-11T04:28:44,477 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8763 rows 2024-12-11T04:28:44,477 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2915 2024-12-11T04:28:44,477 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8745 rows 2024-12-11T04:28:44,477 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2914 2024-12-11T04:28:44,477 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8742 rows 2024-12-11T04:28:44,477 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T04:28:44,477 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d0ab200 to 127.0.0.1:50078 2024-12-11T04:28:44,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:28:44,479 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-11T04:28:44,480 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-11T04:28:44,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=160, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:44,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-12-11T04:28:44,483 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891324483"}]},"ts":"1733891324483"} 2024-12-11T04:28:44,484 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-11T04:28:44,487 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-11T04:28:44,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T04:28:44,488 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c171d7ccfa412c571490e92799f7df2c, UNASSIGN}] 2024-12-11T04:28:44,488 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=c171d7ccfa412c571490e92799f7df2c, UNASSIGN 2024-12-11T04:28:44,489 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=c171d7ccfa412c571490e92799f7df2c, regionState=CLOSING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:44,490 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T04:28:44,490 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE; CloseRegionProcedure c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:28:44,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-12-11T04:28:44,625 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/5233c8d17d154e2a9865242c07de76d7 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5233c8d17d154e2a9865242c07de76d7 2024-12-11T04:28:44,628 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in c171d7ccfa412c571490e92799f7df2c/C of c171d7ccfa412c571490e92799f7df2c into 5233c8d17d154e2a9865242c07de76d7(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:44,628 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:44,628 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c., storeName=c171d7ccfa412c571490e92799f7df2c/C, priority=13, startTime=1733891323793; duration=0sec 2024-12-11T04:28:44,628 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:44,628 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c171d7ccfa412c571490e92799f7df2c:C 2024-12-11T04:28:44,640 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:44,640 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(124): Close c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:44,640 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T04:28:44,640 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1681): Closing c171d7ccfa412c571490e92799f7df2c, disabling compactions & flushes 2024-12-11T04:28:44,640 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:44,640 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:44,640 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. after waiting 0 ms 2024-12-11T04:28:44,640 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:44,640 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(2837): Flushing c171d7ccfa412c571490e92799f7df2c 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-11T04:28:44,641 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=A 2024-12-11T04:28:44,641 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:44,641 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=B 2024-12-11T04:28:44,641 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:44,641 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactingMemStore(205): FLUSHING TO DISK c171d7ccfa412c571490e92799f7df2c, store=C 2024-12-11T04:28:44,641 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:44,644 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/d6590f15dc0d41698ba9c7889bd46460 is 50, key is test_row_0/A:col10/1733891324475/Put/seqid=0 2024-12-11T04:28:44,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742402_1578 (size=12301) 2024-12-11T04:28:44,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-12-11T04:28:45,047 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/d6590f15dc0d41698ba9c7889bd46460 2024-12-11T04:28:45,052 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e9390701ef6d464a99484d24b281aa32 is 50, key is test_row_0/B:col10/1733891324475/Put/seqid=0 2024-12-11T04:28:45,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742403_1579 (size=12301) 2024-12-11T04:28:45,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-12-11T04:28:45,456 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e9390701ef6d464a99484d24b281aa32 2024-12-11T04:28:45,461 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/22b6c32ef57f4a4b8ab53a0ddd0dbd78 is 50, key is test_row_0/C:col10/1733891324475/Put/seqid=0 2024-12-11T04:28:45,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742404_1580 (size=12301) 2024-12-11T04:28:45,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-12-11T04:28:45,865 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=457 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/22b6c32ef57f4a4b8ab53a0ddd0dbd78 2024-12-11T04:28:45,868 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/A/d6590f15dc0d41698ba9c7889bd46460 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/d6590f15dc0d41698ba9c7889bd46460 2024-12-11T04:28:45,870 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/d6590f15dc0d41698ba9c7889bd46460, entries=150, sequenceid=457, filesize=12.0 K 2024-12-11T04:28:45,871 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/B/e9390701ef6d464a99484d24b281aa32 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e9390701ef6d464a99484d24b281aa32 2024-12-11T04:28:45,873 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e9390701ef6d464a99484d24b281aa32, entries=150, sequenceid=457, filesize=12.0 K 2024-12-11T04:28:45,873 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/.tmp/C/22b6c32ef57f4a4b8ab53a0ddd0dbd78 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/22b6c32ef57f4a4b8ab53a0ddd0dbd78 2024-12-11T04:28:45,876 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/22b6c32ef57f4a4b8ab53a0ddd0dbd78, entries=150, sequenceid=457, filesize=12.0 K 2024-12-11T04:28:45,876 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for c171d7ccfa412c571490e92799f7df2c in 1236ms, sequenceid=457, compaction requested=false 2024-12-11T04:28:45,877 DEBUG [StoreCloser-TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/17147dc5f3ef4e5d92fab965424068dc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/fd85af27b0064e55a34f7d9213c5d1cf, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b3e1e62199d14af28c42f83ae9fa1ec9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/78384c01f5d4424b9048099a0412c324, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/24beb141f8564751bcba3fb688c2e369, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/ac60cf650fd24cf094d97dcfa9e6829f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/977b52fabd55432cba1c6f7813ec46fb, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/73ffc29eac9a4eeeaba2051022d63f96, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b61fccd5b682428397dae6cddf25a3b5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/abe5c04fbc5a4302a356c44ada194790, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/a009e5e3b8b542db854fb57ff1102d9f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/e2a9b215d467461abae154b38f946cb7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/304cd594bb334e189a6da7025c4f596d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/8beec5f9d12a4746b59447f72bd7a3af, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/c23a838bbf634a9f85e4083528b1ce8e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/2753f9a15ca444bb80206a319a375a7c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/224f777bdf154badb492e03d23fd40f2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/0dcda12cc299463d9d4a39d1a0641eb7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cd288610dc4548e28b1400c7ee0c27ec, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/e19231da05cc4daa90b87226803b637f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cb5652319b094c039ee17babb5ee8df0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cf07d7463f3d4eb0a617ba37bc3bf2dc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/5612d87b735c4590819278073744e492, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/815e142daeed43b289f10284e24261cc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b19933c4eabe485f9425844a14d171fa, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/8d80b56b0684485c80dfe77e42021862, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/7b83620fcd224e2eb2b047e8604282a8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/fff4b7cd099b469fbbad89ddf9711a39, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/4a3c72c988b749a784d244183e3253a9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/00e9d8eb5e1e4f2891617adf77e695ae, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/857560d0019d44f7b9bfb18a89c591b8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/42c2b32fc5f64eb4bfcef8caf69f8f25] to archive 2024-12-11T04:28:45,878 DEBUG [StoreCloser-TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:28:45,879 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/fd85af27b0064e55a34f7d9213c5d1cf to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/fd85af27b0064e55a34f7d9213c5d1cf 2024-12-11T04:28:45,879 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/24beb141f8564751bcba3fb688c2e369 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/24beb141f8564751bcba3fb688c2e369 2024-12-11T04:28:45,880 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b3e1e62199d14af28c42f83ae9fa1ec9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b3e1e62199d14af28c42f83ae9fa1ec9 2024-12-11T04:28:45,880 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/17147dc5f3ef4e5d92fab965424068dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/17147dc5f3ef4e5d92fab965424068dc 2024-12-11T04:28:45,880 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/78384c01f5d4424b9048099a0412c324 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/78384c01f5d4424b9048099a0412c324 2024-12-11T04:28:45,880 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/73ffc29eac9a4eeeaba2051022d63f96 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/73ffc29eac9a4eeeaba2051022d63f96 2024-12-11T04:28:45,880 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/ac60cf650fd24cf094d97dcfa9e6829f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/ac60cf650fd24cf094d97dcfa9e6829f 2024-12-11T04:28:45,880 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/977b52fabd55432cba1c6f7813ec46fb to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/977b52fabd55432cba1c6f7813ec46fb 2024-12-11T04:28:45,881 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b61fccd5b682428397dae6cddf25a3b5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b61fccd5b682428397dae6cddf25a3b5 2024-12-11T04:28:45,881 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/abe5c04fbc5a4302a356c44ada194790 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/abe5c04fbc5a4302a356c44ada194790 2024-12-11T04:28:45,881 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/e2a9b215d467461abae154b38f946cb7 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/e2a9b215d467461abae154b38f946cb7 2024-12-11T04:28:45,881 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/a009e5e3b8b542db854fb57ff1102d9f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/a009e5e3b8b542db854fb57ff1102d9f 2024-12-11T04:28:45,881 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/304cd594bb334e189a6da7025c4f596d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/304cd594bb334e189a6da7025c4f596d 2024-12-11T04:28:45,882 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/8beec5f9d12a4746b59447f72bd7a3af to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/8beec5f9d12a4746b59447f72bd7a3af 2024-12-11T04:28:45,882 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/c23a838bbf634a9f85e4083528b1ce8e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/c23a838bbf634a9f85e4083528b1ce8e 2024-12-11T04:28:45,882 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/2753f9a15ca444bb80206a319a375a7c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/2753f9a15ca444bb80206a319a375a7c 2024-12-11T04:28:45,883 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/224f777bdf154badb492e03d23fd40f2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/224f777bdf154badb492e03d23fd40f2 2024-12-11T04:28:45,883 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cd288610dc4548e28b1400c7ee0c27ec to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cd288610dc4548e28b1400c7ee0c27ec 2024-12-11T04:28:45,883 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/e19231da05cc4daa90b87226803b637f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/e19231da05cc4daa90b87226803b637f 2024-12-11T04:28:45,883 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cb5652319b094c039ee17babb5ee8df0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cb5652319b094c039ee17babb5ee8df0 2024-12-11T04:28:45,883 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/5612d87b735c4590819278073744e492 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/5612d87b735c4590819278073744e492 2024-12-11T04:28:45,883 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/0dcda12cc299463d9d4a39d1a0641eb7 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/0dcda12cc299463d9d4a39d1a0641eb7 2024-12-11T04:28:45,883 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cf07d7463f3d4eb0a617ba37bc3bf2dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/cf07d7463f3d4eb0a617ba37bc3bf2dc 2024-12-11T04:28:45,884 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/815e142daeed43b289f10284e24261cc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/815e142daeed43b289f10284e24261cc 2024-12-11T04:28:45,885 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b19933c4eabe485f9425844a14d171fa to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/b19933c4eabe485f9425844a14d171fa 2024-12-11T04:28:45,885 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/4a3c72c988b749a784d244183e3253a9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/4a3c72c988b749a784d244183e3253a9 2024-12-11T04:28:45,885 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/fff4b7cd099b469fbbad89ddf9711a39 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/fff4b7cd099b469fbbad89ddf9711a39 2024-12-11T04:28:45,885 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/00e9d8eb5e1e4f2891617adf77e695ae to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/00e9d8eb5e1e4f2891617adf77e695ae 2024-12-11T04:28:45,885 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/8d80b56b0684485c80dfe77e42021862 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/8d80b56b0684485c80dfe77e42021862 2024-12-11T04:28:45,885 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/7b83620fcd224e2eb2b047e8604282a8 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/7b83620fcd224e2eb2b047e8604282a8 2024-12-11T04:28:45,885 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/857560d0019d44f7b9bfb18a89c591b8 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/857560d0019d44f7b9bfb18a89c591b8 2024-12-11T04:28:45,885 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/42c2b32fc5f64eb4bfcef8caf69f8f25 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/42c2b32fc5f64eb4bfcef8caf69f8f25 2024-12-11T04:28:45,887 DEBUG [StoreCloser-TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e11d72139a1f4fb5bd13447e502d0919, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/a7dc37b01c1846b6a71b19903b91ebd3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5d238cffce2f431aa40f7464d53cf5ad, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/30c6c3dc44884d9d80977cc5e5ebe8e0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/51ec0d62228d4a1cb5d927ad7d0b8494, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/331b19d0e37c441aabaab2c560b4b039, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/d94b94177ee247498d937a60a34a519b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/03e387fd5ed4462a83356706896c69c8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5c7ffe367a3c4048af5cbd038f3f64f0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5eeb77fdcf1546f4a22d0f6494c7c9e0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/1011caed1e34447da1ce8941ecd387ba, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/b464acbbd0e84a0f9cdbc924be8dd7f9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/0c4c7286d4c64fd8aef58bce32b0041a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e989927c02324740b92c7d4b282de356, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/c71932644de04a919e2854dfc52043af, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/3fe1fe5356294be9a845e21a6577c308, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/b9845fc231d348dba09401b55c91142c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/68197d217bc046098db985188da56474, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5f52cef444d84371baaf16ae5d22fda9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/1b45ce39dfd946c38011dd8c4be14934, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/cd87fa4a85214ce4ad5302a73c21398f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/4e016c705d96478399ec98dbe0ba9e64, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e80c42f25227404ba73d35f19ad5a34e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/7f1304bb30a34364b4ab1420998a6272, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/d6a793c1196846e282a364bc5fe1162b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/2e49d08532d848ba966e380621a7d094, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5dee19a1c2fd47bdbf0b4ed83f7c4f98, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e58b4744126c471b98146ab46c17be91, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/8963d413ceff413c9d34a04b9fb01749, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/68f96124229947c1abfb220ba32c4c2e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/9e76ddecd5c74f8f9dc9eadaa6ed8e9e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/a1525925ec2b45bf9ac6a40f54151c05] to archive 2024-12-11T04:28:45,887 DEBUG [StoreCloser-TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:28:45,889 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e11d72139a1f4fb5bd13447e502d0919 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e11d72139a1f4fb5bd13447e502d0919 2024-12-11T04:28:45,889 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/30c6c3dc44884d9d80977cc5e5ebe8e0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/30c6c3dc44884d9d80977cc5e5ebe8e0 2024-12-11T04:28:45,889 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/a7dc37b01c1846b6a71b19903b91ebd3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/a7dc37b01c1846b6a71b19903b91ebd3 2024-12-11T04:28:45,889 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5d238cffce2f431aa40f7464d53cf5ad to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5d238cffce2f431aa40f7464d53cf5ad 2024-12-11T04:28:45,889 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/03e387fd5ed4462a83356706896c69c8 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/03e387fd5ed4462a83356706896c69c8 2024-12-11T04:28:45,889 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/d94b94177ee247498d937a60a34a519b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/d94b94177ee247498d937a60a34a519b 2024-12-11T04:28:45,889 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/51ec0d62228d4a1cb5d927ad7d0b8494 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/51ec0d62228d4a1cb5d927ad7d0b8494 2024-12-11T04:28:45,889 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/331b19d0e37c441aabaab2c560b4b039 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/331b19d0e37c441aabaab2c560b4b039 2024-12-11T04:28:45,890 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5c7ffe367a3c4048af5cbd038f3f64f0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5c7ffe367a3c4048af5cbd038f3f64f0 2024-12-11T04:28:45,891 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/1011caed1e34447da1ce8941ecd387ba to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/1011caed1e34447da1ce8941ecd387ba 2024-12-11T04:28:45,891 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5eeb77fdcf1546f4a22d0f6494c7c9e0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5eeb77fdcf1546f4a22d0f6494c7c9e0 2024-12-11T04:28:45,891 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/b464acbbd0e84a0f9cdbc924be8dd7f9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/b464acbbd0e84a0f9cdbc924be8dd7f9 2024-12-11T04:28:45,891 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/c71932644de04a919e2854dfc52043af to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/c71932644de04a919e2854dfc52043af 2024-12-11T04:28:45,891 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e989927c02324740b92c7d4b282de356 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e989927c02324740b92c7d4b282de356 2024-12-11T04:28:45,891 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/3fe1fe5356294be9a845e21a6577c308 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/3fe1fe5356294be9a845e21a6577c308 2024-12-11T04:28:45,891 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/0c4c7286d4c64fd8aef58bce32b0041a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/0c4c7286d4c64fd8aef58bce32b0041a 2024-12-11T04:28:45,893 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/b9845fc231d348dba09401b55c91142c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/b9845fc231d348dba09401b55c91142c 2024-12-11T04:28:45,893 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/68197d217bc046098db985188da56474 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/68197d217bc046098db985188da56474 2024-12-11T04:28:45,893 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/1b45ce39dfd946c38011dd8c4be14934 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/1b45ce39dfd946c38011dd8c4be14934 2024-12-11T04:28:45,893 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5f52cef444d84371baaf16ae5d22fda9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5f52cef444d84371baaf16ae5d22fda9 2024-12-11T04:28:45,893 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/cd87fa4a85214ce4ad5302a73c21398f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/cd87fa4a85214ce4ad5302a73c21398f 2024-12-11T04:28:45,893 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e80c42f25227404ba73d35f19ad5a34e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e80c42f25227404ba73d35f19ad5a34e 2024-12-11T04:28:45,893 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/4e016c705d96478399ec98dbe0ba9e64 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/4e016c705d96478399ec98dbe0ba9e64 2024-12-11T04:28:45,893 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/7f1304bb30a34364b4ab1420998a6272 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/7f1304bb30a34364b4ab1420998a6272 2024-12-11T04:28:45,894 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/d6a793c1196846e282a364bc5fe1162b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/d6a793c1196846e282a364bc5fe1162b 2024-12-11T04:28:45,894 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5dee19a1c2fd47bdbf0b4ed83f7c4f98 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/5dee19a1c2fd47bdbf0b4ed83f7c4f98 2024-12-11T04:28:45,894 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/2e49d08532d848ba966e380621a7d094 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/2e49d08532d848ba966e380621a7d094 2024-12-11T04:28:45,895 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/68f96124229947c1abfb220ba32c4c2e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/68f96124229947c1abfb220ba32c4c2e 2024-12-11T04:28:45,895 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/8963d413ceff413c9d34a04b9fb01749 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/8963d413ceff413c9d34a04b9fb01749 2024-12-11T04:28:45,895 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e58b4744126c471b98146ab46c17be91 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e58b4744126c471b98146ab46c17be91 2024-12-11T04:28:45,895 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/a1525925ec2b45bf9ac6a40f54151c05 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/a1525925ec2b45bf9ac6a40f54151c05 2024-12-11T04:28:45,895 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/9e76ddecd5c74f8f9dc9eadaa6ed8e9e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/9e76ddecd5c74f8f9dc9eadaa6ed8e9e 2024-12-11T04:28:45,896 DEBUG [StoreCloser-TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/06c0704f1031404f8ab19fd4dc28ac2c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/09668b35a13a43c39949da2e23b4a2b0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/1c4fb582ab90432bbf49837841680910, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/30ba8118544e44408d82836a96afa319, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/834e4651361c4567904ae716abb714ff, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/de9be0a1b3bb4ed5b62b72ff989e262f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/7a035bcfef664a558c9a816729250a9d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/0761b08dc4874533b10bbf8d5a1f25a4, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/aeafff1dbbc9484f8f9b76c07ecc429f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/bbd96fe3a33f4be9a01c628683bb2f89, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/ab843211b9444132bac014fa9e8892f5, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/350f4459821a4110960c859cfc1af35e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/9bc61d16b36e441b858e08ac2c7d2493, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/db8bf3da5c8c476da6971ff255de19f8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/0525998891e14a9eaa8ceeef0254bd6f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/2a769496e32848c59434ed32a46f6273, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/b41d85bae2e64bc989e52128e9c6d118, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/aedf5cbbc42f40518ce67aaaefa1f62e, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/f8b2eb2b6dc94b0da9b78103683ae6a6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/ec064d0c2e3c420a97f2287fcca69fe0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/8776c16f2a4446b8ab62baf097fa5a64, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/772e6cc729a64ae59d11803badfc1fe9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/c8c9078feb4b49adad7cded71a3eda46, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/32ccfed076c24929ae68292f851e7475, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/6a87a403692448129d937e3f1e3b06f8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/63e60deecf1a42efb9f14b1a40201992, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5ddc236dd9604490bc4f68b799aa953d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/e87fa2da298e48a19935393265f4d12d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/de882b740a8a471fbaeda3cf919a3972, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/75d279b3ddae44f3a70b0482e74001a3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5e81c96c9313468b8e88e94d922f9d90, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/f15372c6667a4343b591f88daa8778e5] to archive 2024-12-11T04:28:45,897 DEBUG [StoreCloser-TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:28:45,899 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/09668b35a13a43c39949da2e23b4a2b0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/09668b35a13a43c39949da2e23b4a2b0 2024-12-11T04:28:45,899 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/30ba8118544e44408d82836a96afa319 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/30ba8118544e44408d82836a96afa319 2024-12-11T04:28:45,899 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/1c4fb582ab90432bbf49837841680910 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/1c4fb582ab90432bbf49837841680910 2024-12-11T04:28:45,899 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/06c0704f1031404f8ab19fd4dc28ac2c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/06c0704f1031404f8ab19fd4dc28ac2c 2024-12-11T04:28:45,899 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/de9be0a1b3bb4ed5b62b72ff989e262f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/de9be0a1b3bb4ed5b62b72ff989e262f 2024-12-11T04:28:45,900 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/834e4651361c4567904ae716abb714ff to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/834e4651361c4567904ae716abb714ff 2024-12-11T04:28:45,900 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/0761b08dc4874533b10bbf8d5a1f25a4 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/0761b08dc4874533b10bbf8d5a1f25a4 2024-12-11T04:28:45,900 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/7a035bcfef664a558c9a816729250a9d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/7a035bcfef664a558c9a816729250a9d 2024-12-11T04:28:45,903 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/bbd96fe3a33f4be9a01c628683bb2f89 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/bbd96fe3a33f4be9a01c628683bb2f89 2024-12-11T04:28:45,903 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/aeafff1dbbc9484f8f9b76c07ecc429f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/aeafff1dbbc9484f8f9b76c07ecc429f 2024-12-11T04:28:45,903 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/ab843211b9444132bac014fa9e8892f5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/ab843211b9444132bac014fa9e8892f5 2024-12-11T04:28:45,903 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/9bc61d16b36e441b858e08ac2c7d2493 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/9bc61d16b36e441b858e08ac2c7d2493 2024-12-11T04:28:45,903 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/350f4459821a4110960c859cfc1af35e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/350f4459821a4110960c859cfc1af35e 2024-12-11T04:28:45,903 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/db8bf3da5c8c476da6971ff255de19f8 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/db8bf3da5c8c476da6971ff255de19f8 2024-12-11T04:28:45,903 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/0525998891e14a9eaa8ceeef0254bd6f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/0525998891e14a9eaa8ceeef0254bd6f 2024-12-11T04:28:45,903 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/2a769496e32848c59434ed32a46f6273 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/2a769496e32848c59434ed32a46f6273 2024-12-11T04:28:45,908 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/b41d85bae2e64bc989e52128e9c6d118 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/b41d85bae2e64bc989e52128e9c6d118 2024-12-11T04:28:45,909 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/aedf5cbbc42f40518ce67aaaefa1f62e to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/aedf5cbbc42f40518ce67aaaefa1f62e 2024-12-11T04:28:45,909 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/8776c16f2a4446b8ab62baf097fa5a64 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/8776c16f2a4446b8ab62baf097fa5a64 2024-12-11T04:28:45,909 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/f8b2eb2b6dc94b0da9b78103683ae6a6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/f8b2eb2b6dc94b0da9b78103683ae6a6 2024-12-11T04:28:45,909 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/ec064d0c2e3c420a97f2287fcca69fe0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/ec064d0c2e3c420a97f2287fcca69fe0 2024-12-11T04:28:45,909 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/772e6cc729a64ae59d11803badfc1fe9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/772e6cc729a64ae59d11803badfc1fe9 2024-12-11T04:28:45,909 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/c8c9078feb4b49adad7cded71a3eda46 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/c8c9078feb4b49adad7cded71a3eda46 2024-12-11T04:28:45,910 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/32ccfed076c24929ae68292f851e7475 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/32ccfed076c24929ae68292f851e7475 2024-12-11T04:28:45,910 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/6a87a403692448129d937e3f1e3b06f8 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/6a87a403692448129d937e3f1e3b06f8 2024-12-11T04:28:45,910 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/63e60deecf1a42efb9f14b1a40201992 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/63e60deecf1a42efb9f14b1a40201992 2024-12-11T04:28:45,910 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/e87fa2da298e48a19935393265f4d12d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/e87fa2da298e48a19935393265f4d12d 2024-12-11T04:28:45,911 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5ddc236dd9604490bc4f68b799aa953d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5ddc236dd9604490bc4f68b799aa953d 2024-12-11T04:28:45,911 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/de882b740a8a471fbaeda3cf919a3972 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/de882b740a8a471fbaeda3cf919a3972 2024-12-11T04:28:45,911 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5e81c96c9313468b8e88e94d922f9d90 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5e81c96c9313468b8e88e94d922f9d90 2024-12-11T04:28:45,911 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/75d279b3ddae44f3a70b0482e74001a3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/75d279b3ddae44f3a70b0482e74001a3 2024-12-11T04:28:45,911 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/f15372c6667a4343b591f88daa8778e5 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/f15372c6667a4343b591f88daa8778e5 2024-12-11T04:28:45,914 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/recovered.edits/460.seqid, newMaxSeqId=460, maxSeqId=1 2024-12-11T04:28:45,915 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c. 2024-12-11T04:28:45,915 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1635): Region close journal for c171d7ccfa412c571490e92799f7df2c: 2024-12-11T04:28:45,916 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(170): Closed c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:45,916 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=162 updating hbase:meta row=c171d7ccfa412c571490e92799f7df2c, regionState=CLOSED 2024-12-11T04:28:45,918 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=163, resume processing ppid=162 2024-12-11T04:28:45,918 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; CloseRegionProcedure c171d7ccfa412c571490e92799f7df2c, server=5f466b3719ec,39071,1733891180267 in 1.4270 sec 2024-12-11T04:28:45,919 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-11T04:28:45,919 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=c171d7ccfa412c571490e92799f7df2c, UNASSIGN in 1.4300 sec 2024-12-11T04:28:45,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-12-11T04:28:45,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4320 sec 2024-12-11T04:28:45,921 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891325921"}]},"ts":"1733891325921"} 2024-12-11T04:28:45,922 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-11T04:28:45,924 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-11T04:28:45,925 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4440 sec 2024-12-11T04:28:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=160 2024-12-11T04:28:46,586 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 160 completed 2024-12-11T04:28:46,587 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-11T04:28:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=164, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:46,588 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=164, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=164 2024-12-11T04:28:46,589 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=164, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:46,590 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:46,592 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/recovered.edits] 2024-12-11T04:28:46,594 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/d6590f15dc0d41698ba9c7889bd46460 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/d6590f15dc0d41698ba9c7889bd46460 2024-12-11T04:28:46,595 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/374394fb9215499982e553374dff06f2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/A/374394fb9215499982e553374dff06f2 2024-12-11T04:28:46,597 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e9390701ef6d464a99484d24b281aa32 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/e9390701ef6d464a99484d24b281aa32 2024-12-11T04:28:46,597 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/151ec97fcc4f423099dba6bd366d3ffb to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/B/151ec97fcc4f423099dba6bd366d3ffb 2024-12-11T04:28:46,599 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5233c8d17d154e2a9865242c07de76d7 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/5233c8d17d154e2a9865242c07de76d7 2024-12-11T04:28:46,599 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/22b6c32ef57f4a4b8ab53a0ddd0dbd78 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/C/22b6c32ef57f4a4b8ab53a0ddd0dbd78 2024-12-11T04:28:46,601 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/recovered.edits/460.seqid to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c/recovered.edits/460.seqid 2024-12-11T04:28:46,601 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/c171d7ccfa412c571490e92799f7df2c 2024-12-11T04:28:46,601 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-11T04:28:46,603 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=164, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:46,604 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-11T04:28:46,605 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-11T04:28:46,606 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=164, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:46,606 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-11T04:28:46,606 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733891326606"}]},"ts":"9223372036854775807"} 2024-12-11T04:28:46,607 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-11T04:28:46,607 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => c171d7ccfa412c571490e92799f7df2c, NAME => 'TestAcidGuarantees,,1733891300304.c171d7ccfa412c571490e92799f7df2c.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T04:28:46,607 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-11T04:28:46,607 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733891326607"}]},"ts":"9223372036854775807"} 2024-12-11T04:28:46,608 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-11T04:28:46,610 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=164, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:46,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 23 msec 2024-12-11T04:28:46,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=164 2024-12-11T04:28:46,689 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 164 completed 2024-12-11T04:28:46,699 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=246 (was 246), OpenFileDescriptor=454 (was 461), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=390 (was 379) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3499 (was 3522) 2024-12-11T04:28:46,708 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=246, OpenFileDescriptor=454, MaxFileDescriptor=1048576, SystemLoadAverage=390, ProcessCount=11, AvailableMemoryMB=3499 2024-12-11T04:28:46,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T04:28:46,710 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T04:28:46,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:46,711 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T04:28:46,711 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:46,711 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 165 2024-12-11T04:28:46,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-11T04:28:46,712 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T04:28:46,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742405_1581 (size=963) 2024-12-11T04:28:46,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-11T04:28:47,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-11T04:28:47,117 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5 2024-12-11T04:28:47,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742406_1582 (size=53) 2024-12-11T04:28:47,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-11T04:28:47,522 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:28:47,523 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 697517215fc3b7180eb3ba48942407dc, disabling compactions & flushes 2024-12-11T04:28:47,523 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:47,523 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:47,523 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. after waiting 0 ms 2024-12-11T04:28:47,523 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:47,523 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:47,523 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:47,524 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T04:28:47,524 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733891327524"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733891327524"}]},"ts":"1733891327524"} 2024-12-11T04:28:47,525 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-11T04:28:47,525 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T04:28:47,525 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891327525"}]},"ts":"1733891327525"} 2024-12-11T04:28:47,526 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-11T04:28:47,530 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=697517215fc3b7180eb3ba48942407dc, ASSIGN}] 2024-12-11T04:28:47,530 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=166, ppid=165, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=697517215fc3b7180eb3ba48942407dc, ASSIGN 2024-12-11T04:28:47,531 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=166, ppid=165, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=697517215fc3b7180eb3ba48942407dc, ASSIGN; state=OFFLINE, location=5f466b3719ec,39071,1733891180267; forceNewPlan=false, retain=false 2024-12-11T04:28:47,681 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=166 updating hbase:meta row=697517215fc3b7180eb3ba48942407dc, regionState=OPENING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:47,682 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=167, ppid=166, state=RUNNABLE; OpenRegionProcedure 697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:28:47,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-11T04:28:47,834 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:47,836 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:47,836 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(7285): Opening region: {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} 2024-12-11T04:28:47,837 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:47,837 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:28:47,837 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(7327): checking encryption for 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:47,837 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(7330): checking classloading for 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:47,838 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:47,839 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:28:47,839 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 697517215fc3b7180eb3ba48942407dc columnFamilyName A 2024-12-11T04:28:47,839 DEBUG [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:47,840 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.HStore(327): Store=697517215fc3b7180eb3ba48942407dc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:28:47,840 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:47,841 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:28:47,841 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 697517215fc3b7180eb3ba48942407dc columnFamilyName B 2024-12-11T04:28:47,841 DEBUG [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:47,841 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.HStore(327): Store=697517215fc3b7180eb3ba48942407dc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:28:47,841 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:47,842 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:28:47,842 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 697517215fc3b7180eb3ba48942407dc columnFamilyName C 2024-12-11T04:28:47,842 DEBUG [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:47,842 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.HStore(327): Store=697517215fc3b7180eb3ba48942407dc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:28:47,842 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:47,843 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:47,843 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:47,844 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T04:28:47,845 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(1085): writing seq id for 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:47,846 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T04:28:47,847 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(1102): Opened 697517215fc3b7180eb3ba48942407dc; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74814865, jitterRate=0.11482836306095123}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T04:28:47,847 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegion(1001): Region open journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:47,848 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., pid=167, masterSystemTime=1733891327834 2024-12-11T04:28:47,849 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:47,849 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=167}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:47,849 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=166 updating hbase:meta row=697517215fc3b7180eb3ba48942407dc, regionState=OPEN, openSeqNum=2, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:47,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=167, resume processing ppid=166 2024-12-11T04:28:47,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, ppid=166, state=SUCCESS; OpenRegionProcedure 697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 in 168 msec 2024-12-11T04:28:47,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-11T04:28:47,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=697517215fc3b7180eb3ba48942407dc, ASSIGN in 321 msec 2024-12-11T04:28:47,853 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T04:28:47,853 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891327853"}]},"ts":"1733891327853"} 2024-12-11T04:28:47,854 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-11T04:28:47,856 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=165, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T04:28:47,857 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1460 sec 2024-12-11T04:28:48,618 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-11T04:28:48,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-11T04:28:48,815 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-11T04:28:48,816 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fc332d8 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c9b5141 2024-12-11T04:28:48,823 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@103dfc6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:48,824 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:48,825 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59658, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:48,826 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T04:28:48,827 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43708, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T04:28:48,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-11T04:28:48,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T04:28:48,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=168, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-11T04:28:48,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742407_1583 (size=999) 2024-12-11T04:28:49,239 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-11T04:28:49,239 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-11T04:28:49,240 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T04:28:49,242 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=697517215fc3b7180eb3ba48942407dc, REOPEN/MOVE}] 2024-12-11T04:28:49,243 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=697517215fc3b7180eb3ba48942407dc, REOPEN/MOVE 2024-12-11T04:28:49,243 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=170 updating hbase:meta row=697517215fc3b7180eb3ba48942407dc, regionState=CLOSING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,244 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T04:28:49,244 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; CloseRegionProcedure 697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:28:49,395 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,395 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] handler.UnassignRegionHandler(124): Close 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,396 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T04:28:49,396 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1681): Closing 697517215fc3b7180eb3ba48942407dc, disabling compactions & flushes 2024-12-11T04:28:49,396 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:49,396 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:49,396 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. after waiting 0 ms 2024-12-11T04:28:49,396 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:49,399 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-11T04:28:49,399 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:49,399 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegion(1635): Region close journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:49,399 WARN [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] regionserver.HRegionServer(3786): Not adding moved region record: 697517215fc3b7180eb3ba48942407dc to self. 2024-12-11T04:28:49,401 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=171}] handler.UnassignRegionHandler(170): Closed 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,401 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=170 updating hbase:meta row=697517215fc3b7180eb3ba48942407dc, regionState=CLOSED 2024-12-11T04:28:49,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-12-11T04:28:49,403 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; CloseRegionProcedure 697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 in 158 msec 2024-12-11T04:28:49,403 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=697517215fc3b7180eb3ba48942407dc, REOPEN/MOVE; state=CLOSED, location=5f466b3719ec,39071,1733891180267; forceNewPlan=false, retain=true 2024-12-11T04:28:49,553 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=170 updating hbase:meta row=697517215fc3b7180eb3ba48942407dc, regionState=OPENING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=170, state=RUNNABLE; OpenRegionProcedure 697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:28:49,705 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,707 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:49,707 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7285): Opening region: {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} 2024-12-11T04:28:49,708 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,708 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T04:28:49,708 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7327): checking encryption for 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,708 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7330): checking classloading for 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,709 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,709 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:28:49,709 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 697517215fc3b7180eb3ba48942407dc columnFamilyName A 2024-12-11T04:28:49,710 DEBUG [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:49,711 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.HStore(327): Store=697517215fc3b7180eb3ba48942407dc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:28:49,711 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,711 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:28:49,711 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 697517215fc3b7180eb3ba48942407dc columnFamilyName B 2024-12-11T04:28:49,711 DEBUG [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:49,712 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.HStore(327): Store=697517215fc3b7180eb3ba48942407dc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:28:49,712 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,712 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-11T04:28:49,712 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 697517215fc3b7180eb3ba48942407dc columnFamilyName C 2024-12-11T04:28:49,712 DEBUG [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:49,713 INFO [StoreOpener-697517215fc3b7180eb3ba48942407dc-1 {}] regionserver.HStore(327): Store=697517215fc3b7180eb3ba48942407dc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T04:28:49,713 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:49,713 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,714 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,715 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-11T04:28:49,716 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1085): writing seq id for 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,717 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1102): Opened 697517215fc3b7180eb3ba48942407dc; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72008382, jitterRate=0.07300850749015808}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-11T04:28:49,717 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1001): Region open journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:49,718 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., pid=172, masterSystemTime=1733891329705 2024-12-11T04:28:49,719 DEBUG [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:49,719 INFO [RS_OPEN_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:49,720 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=170 updating hbase:meta row=697517215fc3b7180eb3ba48942407dc, regionState=OPEN, openSeqNum=5, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,721 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=170 2024-12-11T04:28:49,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; OpenRegionProcedure 697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 in 167 msec 2024-12-11T04:28:49,723 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-11T04:28:49,723 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=697517215fc3b7180eb3ba48942407dc, REOPEN/MOVE in 480 msec 2024-12-11T04:28:49,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=169, resume processing ppid=168 2024-12-11T04:28:49,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=168, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 483 msec 2024-12-11T04:28:49,725 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 895 msec 2024-12-11T04:28:49,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=168 2024-12-11T04:28:49,727 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17327621 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11a52cdf 2024-12-11T04:28:49,730 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e047c09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:49,731 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1584f18a to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d7fe431 2024-12-11T04:28:49,734 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d631a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:49,735 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b914bf4 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91d72db 2024-12-11T04:28:49,738 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58971172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:49,739 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f6a59e4 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5d836f78 2024-12-11T04:28:49,741 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d7fe93b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:49,742 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x150e08ed to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53305d9b 2024-12-11T04:28:49,746 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11c440f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:49,746 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bb6288a to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58460ef3 2024-12-11T04:28:49,748 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9113f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:49,749 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06556601 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e8cd1ae 2024-12-11T04:28:49,754 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb75907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:49,754 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x458a85fd to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d832d43 2024-12-11T04:28:49,756 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1d3a95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:49,757 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x410bf0c8 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@15b6349f 2024-12-11T04:28:49,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@503a7d2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:49,760 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x67adb273 to 127.0.0.1:50078 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@439b60d5 2024-12-11T04:28:49,763 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@404bb685, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T04:28:49,766 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:49,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-12-11T04:28:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T04:28:49,767 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:49,767 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:49,768 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:49,774 DEBUG [hconnection-0x26cbe32e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:49,774 DEBUG [hconnection-0x34579969-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:49,774 DEBUG [hconnection-0x2108f5e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:49,775 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59662, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:49,775 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59678, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:49,775 DEBUG [hconnection-0xb204b4d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:49,775 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59682, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:49,776 DEBUG [hconnection-0x3c870473-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:49,776 DEBUG [hconnection-0x25905523-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:49,776 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59690, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:49,777 DEBUG [hconnection-0x69a2d954-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:49,777 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59714, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:49,777 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59706, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:49,778 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:49,780 DEBUG [hconnection-0x1831a06d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:49,780 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59728, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:49,784 DEBUG [hconnection-0x691b41ae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:49,786 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:49,789 DEBUG [hconnection-0x6ebbc43d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T04:28:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:28:49,791 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59744, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T04:28:49,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:28:49,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:49,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:28:49,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:49,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:28:49,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:49,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:49,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891389808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:49,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891389809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:49,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891389809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:49,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891389811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:49,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891389811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121128dbcbcfe125464b8c34b0eda5c9f6b4_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891329790/Put/seqid=0 2024-12-11T04:28:49,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742408_1584 (size=12154) 2024-12-11T04:28:49,828 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:49,831 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121128dbcbcfe125464b8c34b0eda5c9f6b4_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121128dbcbcfe125464b8c34b0eda5c9f6b4_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:49,832 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/4a44d0d5f9b84f12a7aa34e72697b2e9, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:49,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/4a44d0d5f9b84f12a7aa34e72697b2e9 is 175, key is test_row_0/A:col10/1733891329790/Put/seqid=0 2024-12-11T04:28:49,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742409_1585 (size=30955) 2024-12-11T04:28:49,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T04:28:49,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:49,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891389913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,915 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:49,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891389913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:49,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891389913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:49,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891389914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:49,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891389914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,919 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:49,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T04:28:49,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:49,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:49,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:49,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:49,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:49,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:50,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T04:28:50,071 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T04:28:50,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:50,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:50,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:50,072 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:50,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:50,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:50,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891390116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891390116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891390116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891390117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891390117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,224 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T04:28:50,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:50,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:50,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:50,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:50,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:50,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:50,238 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/4a44d0d5f9b84f12a7aa34e72697b2e9 2024-12-11T04:28:50,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/d23a7928aca14975bb605c2378ca48fc is 50, key is test_row_0/B:col10/1733891329790/Put/seqid=0 2024-12-11T04:28:50,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742410_1586 (size=12001) 2024-12-11T04:28:50,269 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/d23a7928aca14975bb605c2378ca48fc 2024-12-11T04:28:50,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/128f1651d564484fb0305bfd8676ad0d is 50, key is test_row_0/C:col10/1733891329790/Put/seqid=0 2024-12-11T04:28:50,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742411_1587 (size=12001) 2024-12-11T04:28:50,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/128f1651d564484fb0305bfd8676ad0d 2024-12-11T04:28:50,303 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/4a44d0d5f9b84f12a7aa34e72697b2e9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/4a44d0d5f9b84f12a7aa34e72697b2e9 2024-12-11T04:28:50,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/4a44d0d5f9b84f12a7aa34e72697b2e9, entries=150, sequenceid=16, filesize=30.2 K 2024-12-11T04:28:50,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/d23a7928aca14975bb605c2378ca48fc as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/d23a7928aca14975bb605c2378ca48fc 2024-12-11T04:28:50,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/d23a7928aca14975bb605c2378ca48fc, entries=150, sequenceid=16, filesize=11.7 K 2024-12-11T04:28:50,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/128f1651d564484fb0305bfd8676ad0d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/128f1651d564484fb0305bfd8676ad0d 2024-12-11T04:28:50,315 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/128f1651d564484fb0305bfd8676ad0d, entries=150, sequenceid=16, filesize=11.7 K 2024-12-11T04:28:50,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 697517215fc3b7180eb3ba48942407dc in 525ms, sequenceid=16, compaction requested=false 2024-12-11T04:28:50,316 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-11T04:28:50,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:50,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T04:28:50,377 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,378 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-11T04:28:50,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:50,378 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-11T04:28:50,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:28:50,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:50,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:28:50,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:50,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:28:50,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:50,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412114b076ee541114d99af5538351f3fb5c3_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891329809/Put/seqid=0 2024-12-11T04:28:50,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742412_1588 (size=12154) 2024-12-11T04:28:50,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:50,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:50,473 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891390471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,473 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891390471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891390471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,474 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891390472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891390472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891390574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891390574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891390575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891390575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891390575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891390776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891390777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891390777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,778 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891390777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:50,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891390778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:50,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:50,793 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412114b076ee541114d99af5538351f3fb5c3_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114b076ee541114d99af5538351f3fb5c3_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:50,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/9222775907c44102a265724456338d39, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:50,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/9222775907c44102a265724456338d39 is 175, key is test_row_0/A:col10/1733891329809/Put/seqid=0 2024-12-11T04:28:50,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742413_1589 (size=30955) 2024-12-11T04:28:50,800 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/9222775907c44102a265724456338d39 2024-12-11T04:28:50,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/a0565cc8a1b942f79346ee2a41a0af50 is 50, key is test_row_0/B:col10/1733891329809/Put/seqid=0 2024-12-11T04:28:50,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742414_1590 (size=12001) 2024-12-11T04:28:50,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T04:28:51,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891391079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891391079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891391080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891391080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891391081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,211 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/a0565cc8a1b942f79346ee2a41a0af50 2024-12-11T04:28:51,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/61be4930774948debfd6e362db5c5bca is 50, key is test_row_0/C:col10/1733891329809/Put/seqid=0 2024-12-11T04:28:51,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742415_1591 (size=12001) 2024-12-11T04:28:51,223 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/61be4930774948debfd6e362db5c5bca 2024-12-11T04:28:51,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/9222775907c44102a265724456338d39 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9222775907c44102a265724456338d39 2024-12-11T04:28:51,235 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9222775907c44102a265724456338d39, entries=150, sequenceid=41, filesize=30.2 K 2024-12-11T04:28:51,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/a0565cc8a1b942f79346ee2a41a0af50 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a0565cc8a1b942f79346ee2a41a0af50 2024-12-11T04:28:51,239 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a0565cc8a1b942f79346ee2a41a0af50, entries=150, sequenceid=41, filesize=11.7 K 2024-12-11T04:28:51,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/61be4930774948debfd6e362db5c5bca as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/61be4930774948debfd6e362db5c5bca 2024-12-11T04:28:51,245 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/61be4930774948debfd6e362db5c5bca, entries=150, sequenceid=41, filesize=11.7 K 2024-12-11T04:28:51,246 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 697517215fc3b7180eb3ba48942407dc in 868ms, sequenceid=41, compaction requested=false 2024-12-11T04:28:51,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:51,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:51,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-12-11T04:28:51,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-12-11T04:28:51,248 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-11T04:28:51,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4790 sec 2024-12-11T04:28:51,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.4830 sec 2024-12-11T04:28:51,437 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-11T04:28:51,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:51,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T04:28:51,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:28:51,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:51,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:28:51,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:51,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:28:51,587 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:51,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121196028cd218094eb7956ef2364358a650_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891331585/Put/seqid=0 2024-12-11T04:28:51,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891391599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891391600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891391601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891391602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891391603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742416_1592 (size=12154) 2024-12-11T04:28:51,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891391704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,705 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891391704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891391706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891391706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891391708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-11T04:28:51,871 INFO [Thread-2578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-11T04:28:51,872 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:51,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-12-11T04:28:51,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T04:28:51,874 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:51,874 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:51,874 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:51,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891391906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891391907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891391907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891391910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:51,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891391910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:51,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T04:28:52,010 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:52,013 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121196028cd218094eb7956ef2364358a650_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121196028cd218094eb7956ef2364358a650_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:52,014 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/da62eb92b9db4a29b921aedfbdd3b4b1, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:52,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/da62eb92b9db4a29b921aedfbdd3b4b1 is 175, key is test_row_0/A:col10/1733891331585/Put/seqid=0 2024-12-11T04:28:52,026 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T04:28:52,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:52,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742417_1593 (size=30955) 2024-12-11T04:28:52,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T04:28:52,178 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T04:28:52,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:52,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891392209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891392210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,212 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:52,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891392211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891392214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891392215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,331 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T04:28:52,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:52,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,438 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/da62eb92b9db4a29b921aedfbdd3b4b1 2024-12-11T04:28:52,445 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/c957ca52cdfb4d31b560ab522c4f09cd is 50, key is test_row_0/B:col10/1733891331585/Put/seqid=0 2024-12-11T04:28:52,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742418_1594 (size=12001) 2024-12-11T04:28:52,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T04:28:52,486 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T04:28:52,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:52,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,639 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T04:28:52,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:52,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,640 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:52,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891392714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891392715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:52,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891392716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,719 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:52,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891392718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891392720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,792 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,793 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T04:28:52,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:52,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,793 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:52,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/c957ca52cdfb4d31b560ab522c4f09cd 2024-12-11T04:28:52,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/a85df34644a94a748eaf9e2ffbe82f3b is 50, key is test_row_0/C:col10/1733891331585/Put/seqid=0 2024-12-11T04:28:52,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742419_1595 (size=12001) 2024-12-11T04:28:52,864 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/a85df34644a94a748eaf9e2ffbe82f3b 2024-12-11T04:28:52,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/da62eb92b9db4a29b921aedfbdd3b4b1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/da62eb92b9db4a29b921aedfbdd3b4b1 2024-12-11T04:28:52,872 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/da62eb92b9db4a29b921aedfbdd3b4b1, entries=150, sequenceid=55, filesize=30.2 K 2024-12-11T04:28:52,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/c957ca52cdfb4d31b560ab522c4f09cd as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/c957ca52cdfb4d31b560ab522c4f09cd 2024-12-11T04:28:52,876 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/c957ca52cdfb4d31b560ab522c4f09cd, entries=150, sequenceid=55, filesize=11.7 K 2024-12-11T04:28:52,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/a85df34644a94a748eaf9e2ffbe82f3b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/a85df34644a94a748eaf9e2ffbe82f3b 2024-12-11T04:28:52,880 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/a85df34644a94a748eaf9e2ffbe82f3b, entries=150, sequenceid=55, filesize=11.7 K 2024-12-11T04:28:52,881 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 697517215fc3b7180eb3ba48942407dc in 1296ms, sequenceid=55, compaction requested=true 2024-12-11T04:28:52,881 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:52,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:52,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:52,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:52,881 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:52,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:52,881 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:52,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:52,881 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:52,882 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:52,882 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:52,882 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/A is initiating minor compaction (all files) 2024-12-11T04:28:52,882 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/B is initiating minor compaction (all files) 2024-12-11T04:28:52,882 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/A in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,882 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/B in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,883 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/4a44d0d5f9b84f12a7aa34e72697b2e9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9222775907c44102a265724456338d39, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/da62eb92b9db4a29b921aedfbdd3b4b1] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=90.7 K 2024-12-11T04:28:52,883 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/d23a7928aca14975bb605c2378ca48fc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a0565cc8a1b942f79346ee2a41a0af50, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/c957ca52cdfb4d31b560ab522c4f09cd] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=35.2 K 2024-12-11T04:28:52,883 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,883 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/4a44d0d5f9b84f12a7aa34e72697b2e9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9222775907c44102a265724456338d39, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/da62eb92b9db4a29b921aedfbdd3b4b1] 2024-12-11T04:28:52,883 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a44d0d5f9b84f12a7aa34e72697b2e9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733891329784 2024-12-11T04:28:52,883 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting d23a7928aca14975bb605c2378ca48fc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733891329784 2024-12-11T04:28:52,883 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting a0565cc8a1b942f79346ee2a41a0af50, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733891329807 2024-12-11T04:28:52,883 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9222775907c44102a265724456338d39, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733891329807 2024-12-11T04:28:52,884 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting c957ca52cdfb4d31b560ab522c4f09cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733891330471 2024-12-11T04:28:52,884 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting da62eb92b9db4a29b921aedfbdd3b4b1, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733891330471 2024-12-11T04:28:52,895 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#B#compaction#510 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:52,897 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/0eeef0cd0b7b4a74ab2a178853c9779c is 50, key is test_row_0/B:col10/1733891331585/Put/seqid=0 2024-12-11T04:28:52,898 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:52,900 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121137b55732701a481aa3e69cd1aff6bfe7_697517215fc3b7180eb3ba48942407dc store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:52,902 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121137b55732701a481aa3e69cd1aff6bfe7_697517215fc3b7180eb3ba48942407dc, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:52,902 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121137b55732701a481aa3e69cd1aff6bfe7_697517215fc3b7180eb3ba48942407dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:52,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742421_1597 (size=4469) 2024-12-11T04:28:52,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742420_1596 (size=12104) 2024-12-11T04:28:52,907 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#A#compaction#511 average throughput is 2.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:52,907 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/57a60ec820b64a44bc8bcc76da02adcd is 175, key is test_row_0/A:col10/1733891331585/Put/seqid=0 2024-12-11T04:28:52,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742422_1598 (size=31058) 2024-12-11T04:28:52,945 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:52,945 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-11T04:28:52,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:52,946 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T04:28:52,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:28:52,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:52,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:28:52,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:52,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:28:52,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:52,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e60a380bcddb4f2f8c4fd64a5543d8f6_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891331599/Put/seqid=0 2024-12-11T04:28:52,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742423_1599 (size=12154) 2024-12-11T04:28:52,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:52,961 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e60a380bcddb4f2f8c4fd64a5543d8f6_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e60a380bcddb4f2f8c4fd64a5543d8f6_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:52,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/5becf625da754edcb0ce57ad03d96984, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:52,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/5becf625da754edcb0ce57ad03d96984 is 175, key is test_row_0/A:col10/1733891331599/Put/seqid=0 2024-12-11T04:28:52,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742424_1600 (size=30955) 2024-12-11T04:28:52,968 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/5becf625da754edcb0ce57ad03d96984 2024-12-11T04:28:52,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T04:28:52,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/7ed18ab4932643db9313fca209206c96 is 50, key is test_row_0/B:col10/1733891331599/Put/seqid=0 2024-12-11T04:28:52,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742425_1601 (size=12001) 2024-12-11T04:28:52,982 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/7ed18ab4932643db9313fca209206c96 2024-12-11T04:28:52,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/bca4373974e141e685d6058a11a072d9 is 50, key is test_row_0/C:col10/1733891331599/Put/seqid=0 2024-12-11T04:28:52,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742426_1602 (size=12001) 2024-12-11T04:28:52,994 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/bca4373974e141e685d6058a11a072d9 2024-12-11T04:28:52,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/5becf625da754edcb0ce57ad03d96984 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/5becf625da754edcb0ce57ad03d96984 2024-12-11T04:28:53,001 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/5becf625da754edcb0ce57ad03d96984, entries=150, sequenceid=77, filesize=30.2 K 2024-12-11T04:28:53,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/7ed18ab4932643db9313fca209206c96 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/7ed18ab4932643db9313fca209206c96 2024-12-11T04:28:53,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,005 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/7ed18ab4932643db9313fca209206c96, entries=150, sequenceid=77, filesize=11.7 K 2024-12-11T04:28:53,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/bca4373974e141e685d6058a11a072d9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/bca4373974e141e685d6058a11a072d9 2024-12-11T04:28:53,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,010 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/bca4373974e141e685d6058a11a072d9, entries=150, sequenceid=77, filesize=11.7 K 2024-12-11T04:28:53,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,011 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for 697517215fc3b7180eb3ba48942407dc in 65ms, sequenceid=77, compaction requested=true 2024-12-11T04:28:53,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:53,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:53,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-11T04:28:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-12-11T04:28:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,013 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-12-11T04:28:53,013 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1380 sec 2024-12-11T04:28:53,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,014 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 1.1410 sec 2024-12-11T04:28:53,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,311 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/0eeef0cd0b7b4a74ab2a178853c9779c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/0eeef0cd0b7b4a74ab2a178853c9779c 2024-12-11T04:28:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,319 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/57a60ec820b64a44bc8bcc76da02adcd as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/57a60ec820b64a44bc8bcc76da02adcd 2024-12-11T04:28:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,324 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/A of 697517215fc3b7180eb3ba48942407dc into 57a60ec820b64a44bc8bcc76da02adcd(size=30.3 K), total size for store is 60.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:53,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,324 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:53,324 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/A, priority=13, startTime=1733891332881; duration=0sec 2024-12-11T04:28:53,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,324 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:53,324 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:A 2024-12-11T04:28:53,324 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:53,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,325 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:53,325 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/C is initiating minor compaction (all files) 2024-12-11T04:28:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,325 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/C in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:53,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,325 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/128f1651d564484fb0305bfd8676ad0d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/61be4930774948debfd6e362db5c5bca, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/a85df34644a94a748eaf9e2ffbe82f3b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/bca4373974e141e685d6058a11a072d9] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=46.9 K 2024-12-11T04:28:53,326 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 128f1651d564484fb0305bfd8676ad0d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733891329784 2024-12-11T04:28:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,326 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61be4930774948debfd6e362db5c5bca, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733891329807 2024-12-11T04:28:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,327 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting a85df34644a94a748eaf9e2ffbe82f3b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733891330471 2024-12-11T04:28:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,327 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting bca4373974e141e685d6058a11a072d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733891331599 2024-12-11T04:28:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,333 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/B of 697517215fc3b7180eb3ba48942407dc into 0eeef0cd0b7b4a74ab2a178853c9779c(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:53,333 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:53,333 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/B, priority=13, startTime=1733891332881; duration=0sec 2024-12-11T04:28:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,333 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:53,333 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:B 2024-12-11T04:28:53,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,358 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#C#compaction#515 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,358 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/4a7b63c883c34ebfa9db65af603384b2 is 50, key is test_row_0/C:col10/1733891331599/Put/seqid=0 2024-12-11T04:28:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742427_1603 (size=12139) 2024-12-11T04:28:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,367 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/4a7b63c883c34ebfa9db65af603384b2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/4a7b63c883c34ebfa9db65af603384b2 2024-12-11T04:28:53,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,372 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/C of 697517215fc3b7180eb3ba48942407dc into 4a7b63c883c34ebfa9db65af603384b2(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:53,372 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:53,372 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/C, priority=12, startTime=1733891332881; duration=0sec 2024-12-11T04:28:53,372 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:53,372 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:C 2024-12-11T04:28:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:53,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-11T04:28:53,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:28:53,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:53,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:28:53,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:53,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:28:53,781 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:53,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:53,795 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412116aa9c30b581c4106b0b7b5e8f63db40a_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891333779/Put/seqid=0 2024-12-11T04:28:53,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742429_1605 (size=26798) 2024-12-11T04:28:53,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:53,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891393815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:53,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:53,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891393815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:53,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:53,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891393817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:53,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:53,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891393817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:53,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:53,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891393817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:53,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:53,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891393920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:53,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:53,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891393920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:53,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:53,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891393921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:53,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:53,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891393921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:53,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:53,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891393921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:53,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-11T04:28:53,977 INFO [Thread-2578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-12-11T04:28:53,978 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:53,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-12-11T04:28:53,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T04:28:53,980 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:53,980 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:53,981 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:54,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T04:28:54,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891394122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891394122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891394123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891394124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891394125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,132 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-11T04:28:54,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:54,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:54,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:54,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:54,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:54,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:54,216 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:54,220 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412116aa9c30b581c4106b0b7b5e8f63db40a_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412116aa9c30b581c4106b0b7b5e8f63db40a_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:54,221 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/bd4db745a5f5432a94d490075e2b22a9, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:54,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/bd4db745a5f5432a94d490075e2b22a9 is 175, key is test_row_0/A:col10/1733891333779/Put/seqid=0 2024-12-11T04:28:54,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742428_1604 (size=82585) 2024-12-11T04:28:54,224 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/bd4db745a5f5432a94d490075e2b22a9 2024-12-11T04:28:54,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/aa10282e587748dc84e04cb33353086c is 50, key is test_row_0/B:col10/1733891333779/Put/seqid=0 2024-12-11T04:28:54,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742430_1606 (size=12001) 2024-12-11T04:28:54,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T04:28:54,284 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-11T04:28:54,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:54,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:54,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:54,285 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:54,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:54,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:54,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891394425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891394426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891394427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891394428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891394429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,437 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-11T04:28:54,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:54,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:54,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:54,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:54,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:54,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:54,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T04:28:54,589 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-11T04:28:54,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:54,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:54,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:54,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:54,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:54,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:54,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/aa10282e587748dc84e04cb33353086c 2024-12-11T04:28:54,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/804c2d9e140340b39295b207dc68b3cc is 50, key is test_row_0/C:col10/1733891333779/Put/seqid=0 2024-12-11T04:28:54,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742431_1607 (size=12001) 2024-12-11T04:28:54,646 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/804c2d9e140340b39295b207dc68b3cc 2024-12-11T04:28:54,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/bd4db745a5f5432a94d490075e2b22a9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/bd4db745a5f5432a94d490075e2b22a9 2024-12-11T04:28:54,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/bd4db745a5f5432a94d490075e2b22a9, entries=450, sequenceid=92, filesize=80.6 K 2024-12-11T04:28:54,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/aa10282e587748dc84e04cb33353086c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/aa10282e587748dc84e04cb33353086c 2024-12-11T04:28:54,658 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/aa10282e587748dc84e04cb33353086c, entries=150, sequenceid=92, filesize=11.7 K 2024-12-11T04:28:54,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/804c2d9e140340b39295b207dc68b3cc as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/804c2d9e140340b39295b207dc68b3cc 2024-12-11T04:28:54,662 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/804c2d9e140340b39295b207dc68b3cc, entries=150, sequenceid=92, filesize=11.7 K 2024-12-11T04:28:54,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 697517215fc3b7180eb3ba48942407dc in 884ms, sequenceid=92, compaction requested=true 2024-12-11T04:28:54,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:54,663 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:54,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:54,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:54,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:54,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:54,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:54,663 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:54,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:54,664 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:54,664 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 144598 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:54,664 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/B is initiating minor compaction (all files) 2024-12-11T04:28:54,664 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/A is initiating minor compaction (all files) 2024-12-11T04:28:54,664 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/B in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:54,664 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/0eeef0cd0b7b4a74ab2a178853c9779c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/7ed18ab4932643db9313fca209206c96, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/aa10282e587748dc84e04cb33353086c] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=35.3 K 2024-12-11T04:28:54,664 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/A in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:54,664 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/57a60ec820b64a44bc8bcc76da02adcd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/5becf625da754edcb0ce57ad03d96984, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/bd4db745a5f5432a94d490075e2b22a9] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=141.2 K 2024-12-11T04:28:54,664 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:54,664 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/57a60ec820b64a44bc8bcc76da02adcd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/5becf625da754edcb0ce57ad03d96984, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/bd4db745a5f5432a94d490075e2b22a9] 2024-12-11T04:28:54,665 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 0eeef0cd0b7b4a74ab2a178853c9779c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733891330471 2024-12-11T04:28:54,665 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57a60ec820b64a44bc8bcc76da02adcd, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733891330471 2024-12-11T04:28:54,665 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ed18ab4932643db9313fca209206c96, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733891331599 2024-12-11T04:28:54,665 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5becf625da754edcb0ce57ad03d96984, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733891331599 2024-12-11T04:28:54,666 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting aa10282e587748dc84e04cb33353086c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733891333773 2024-12-11T04:28:54,666 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd4db745a5f5432a94d490075e2b22a9, keycount=450, bloomtype=ROW, size=80.6 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733891333750 2024-12-11T04:28:54,672 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:54,685 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#B#compaction#520 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:54,686 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/f03de02559b946de9dd9406a1a98e064 is 50, key is test_row_0/B:col10/1733891333779/Put/seqid=0 2024-12-11T04:28:54,700 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121185f8bf41c8924f1b9bd006a930aed88f_697517215fc3b7180eb3ba48942407dc store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:54,703 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121185f8bf41c8924f1b9bd006a930aed88f_697517215fc3b7180eb3ba48942407dc, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:54,703 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121185f8bf41c8924f1b9bd006a930aed88f_697517215fc3b7180eb3ba48942407dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:54,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742433_1609 (size=4469) 2024-12-11T04:28:54,709 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#A#compaction#519 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:54,709 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/d082ec823e514b61ad921c6a8c99db0f is 175, key is test_row_0/A:col10/1733891333779/Put/seqid=0 2024-12-11T04:28:54,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742434_1610 (size=31161) 2024-12-11T04:28:54,741 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742432_1608 (size=12207) 2024-12-11T04:28:54,742 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/d082ec823e514b61ad921c6a8c99db0f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/d082ec823e514b61ad921c6a8c99db0f 2024-12-11T04:28:54,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-11T04:28:54,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:54,742 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-11T04:28:54,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:28:54,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:54,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:28:54,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:54,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:28:54,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:54,747 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/A of 697517215fc3b7180eb3ba48942407dc into d082ec823e514b61ad921c6a8c99db0f(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:54,747 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:54,747 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/A, priority=13, startTime=1733891334663; duration=0sec 2024-12-11T04:28:54,747 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:54,747 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:A 2024-12-11T04:28:54,747 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-11T04:28:54,748 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-11T04:28:54,748 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-11T04:28:54,748 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. because compaction request was cancelled 2024-12-11T04:28:54,748 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:C 2024-12-11T04:28:54,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211bec9febc67af4e7991ca04baad2f5feb_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891333816/Put/seqid=0 2024-12-11T04:28:54,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742435_1611 (size=12154) 2024-12-11T04:28:54,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:54,767 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211bec9febc67af4e7991ca04baad2f5feb_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211bec9febc67af4e7991ca04baad2f5feb_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:54,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/6197f54758f249da9d471bad5c2ab171, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:54,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/6197f54758f249da9d471bad5c2ab171 is 175, key is test_row_0/A:col10/1733891333816/Put/seqid=0 2024-12-11T04:28:54,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742436_1612 (size=30955) 2024-12-11T04:28:54,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:54,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:54,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891394936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891394936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891394937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891394938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:54,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:54,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891394938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891395040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891395040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891395040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891395040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891395041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T04:28:55,146 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/f03de02559b946de9dd9406a1a98e064 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/f03de02559b946de9dd9406a1a98e064 2024-12-11T04:28:55,150 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/B of 697517215fc3b7180eb3ba48942407dc into f03de02559b946de9dd9406a1a98e064(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:55,150 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:55,150 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/B, priority=13, startTime=1733891334663; duration=0sec 2024-12-11T04:28:55,150 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:55,150 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:B 2024-12-11T04:28:55,172 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=116, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/6197f54758f249da9d471bad5c2ab171 2024-12-11T04:28:55,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/a621efc3be8d494586b1c8637fdc41c6 is 50, key is test_row_0/B:col10/1733891333816/Put/seqid=0 2024-12-11T04:28:55,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742437_1613 (size=12001) 2024-12-11T04:28:55,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891395242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891395242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891395242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891395243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891395243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891395545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891395545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891395546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891395547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:55,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891395547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:55,590 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/a621efc3be8d494586b1c8637fdc41c6 2024-12-11T04:28:55,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/0f7a085fad8b422db63cbcdcee8cdaf9 is 50, key is test_row_0/C:col10/1733891333816/Put/seqid=0 2024-12-11T04:28:55,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742438_1614 (size=12001) 2024-12-11T04:28:56,001 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/0f7a085fad8b422db63cbcdcee8cdaf9 2024-12-11T04:28:56,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/6197f54758f249da9d471bad5c2ab171 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6197f54758f249da9d471bad5c2ab171 2024-12-11T04:28:56,008 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6197f54758f249da9d471bad5c2ab171, entries=150, sequenceid=116, filesize=30.2 K 2024-12-11T04:28:56,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/a621efc3be8d494586b1c8637fdc41c6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a621efc3be8d494586b1c8637fdc41c6 2024-12-11T04:28:56,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,014 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a621efc3be8d494586b1c8637fdc41c6, entries=150, sequenceid=116, filesize=11.7 K 2024-12-11T04:28:56,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/0f7a085fad8b422db63cbcdcee8cdaf9 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/0f7a085fad8b422db63cbcdcee8cdaf9 2024-12-11T04:28:56,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,019 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/0f7a085fad8b422db63cbcdcee8cdaf9, entries=150, sequenceid=116, filesize=11.7 K 2024-12-11T04:28:56,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,020 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 697517215fc3b7180eb3ba48942407dc in 1278ms, sequenceid=116, compaction requested=true 2024-12-11T04:28:56,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:56,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:56,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-11T04:28:56,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-12-11T04:28:56,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,023 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-12-11T04:28:56,023 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0400 sec 2024-12-11T04:28:56,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 2.0460 sec 2024-12-11T04:28:56,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:56,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-11T04:28:56,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:28:56,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:56,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:28:56,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:56,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:28:56,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:56,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211df6eaaed49fb4753a32b32b78d4236fd_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891336054/Put/seqid=0 2024-12-11T04:28:56,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742439_1615 (size=14694) 2024-12-11T04:28:56,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891396076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891396076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891396076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891396080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891396080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-11T04:28:56,083 INFO [Thread-2578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-12-11T04:28:56,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:28:56,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-12-11T04:28:56,086 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:28:56,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T04:28:56,087 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:28:56,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:28:56,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891396181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891396181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891396181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891396184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891396184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T04:28:56,239 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-11T04:28:56,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:56,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:56,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:56,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891396384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891396384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891396385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T04:28:56,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891396387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891396388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,392 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-11T04:28:56,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:56,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:56,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:56,393 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,466 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:56,469 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211df6eaaed49fb4753a32b32b78d4236fd_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211df6eaaed49fb4753a32b32b78d4236fd_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:56,470 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/af34135ee36741d4bd7678623edc7dd7, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:56,470 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/af34135ee36741d4bd7678623edc7dd7 is 175, key is test_row_0/A:col10/1733891336054/Put/seqid=0 2024-12-11T04:28:56,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742440_1616 (size=39649) 2024-12-11T04:28:56,544 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-11T04:28:56,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:56,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:56,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:56,545 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891396687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T04:28:56,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891396688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891396689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891396692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:56,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891396692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,697 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,697 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-11T04:28:56,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:56,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:56,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:56,698 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,850 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:56,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-11T04:28:56,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:56,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:56,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:56,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:56,881 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/af34135ee36741d4bd7678623edc7dd7 2024-12-11T04:28:56,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/534c140c5d464abb9c65b882860c42e3 is 50, key is test_row_0/B:col10/1733891336054/Put/seqid=0 2024-12-11T04:28:56,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742441_1617 (size=12101) 2024-12-11T04:28:56,891 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/534c140c5d464abb9c65b882860c42e3 2024-12-11T04:28:56,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/9ee1a632363846f9993a26b3c62cb310 is 50, key is test_row_0/C:col10/1733891336054/Put/seqid=0 2024-12-11T04:28:56,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742442_1618 (size=12101) 2024-12-11T04:28:57,003 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:57,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-11T04:28:57,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:57,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:57,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:57,003 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:57,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:57,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:57,155 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:57,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-11T04:28:57,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:57,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:57,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:57,156 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:57,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:57,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:57,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T04:28:57,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:57,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891397190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:57,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:57,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891397193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:57,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:57,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891397194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:57,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:57,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891397194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:57,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:57,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891397195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:57,308 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:57,309 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-11T04:28:57,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:57,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:57,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:57,309 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:57,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:57,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:28:57,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/9ee1a632363846f9993a26b3c62cb310 2024-12-11T04:28:57,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/af34135ee36741d4bd7678623edc7dd7 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/af34135ee36741d4bd7678623edc7dd7 2024-12-11T04:28:57,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/af34135ee36741d4bd7678623edc7dd7, entries=200, sequenceid=132, filesize=38.7 K 2024-12-11T04:28:57,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/534c140c5d464abb9c65b882860c42e3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/534c140c5d464abb9c65b882860c42e3 2024-12-11T04:28:57,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/534c140c5d464abb9c65b882860c42e3, entries=150, sequenceid=132, filesize=11.8 K 2024-12-11T04:28:57,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/9ee1a632363846f9993a26b3c62cb310 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/9ee1a632363846f9993a26b3c62cb310 2024-12-11T04:28:57,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/9ee1a632363846f9993a26b3c62cb310, entries=150, sequenceid=132, filesize=11.8 K 2024-12-11T04:28:57,325 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 697517215fc3b7180eb3ba48942407dc in 1271ms, sequenceid=132, compaction requested=true 2024-12-11T04:28:57,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:57,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:57,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:57,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:57,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:57,326 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:57,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:57,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-11T04:28:57,326 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101765 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:57,327 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/A is initiating minor compaction (all files) 2024-12-11T04:28:57,327 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/A in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:57,327 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/d082ec823e514b61ad921c6a8c99db0f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6197f54758f249da9d471bad5c2ab171, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/af34135ee36741d4bd7678623edc7dd7] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=99.4 K 2024-12-11T04:28:57,327 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:57,327 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/d082ec823e514b61ad921c6a8c99db0f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6197f54758f249da9d471bad5c2ab171, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/af34135ee36741d4bd7678623edc7dd7] 2024-12-11T04:28:57,327 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting d082ec823e514b61ad921c6a8c99db0f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733891333773 2024-12-11T04:28:57,327 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 6197f54758f249da9d471bad5c2ab171, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733891333814 2024-12-11T04:28:57,328 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting af34135ee36741d4bd7678623edc7dd7, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733891334935 2024-12-11T04:28:57,329 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:28:57,330 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48242 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:28:57,330 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/C is initiating minor compaction (all files) 2024-12-11T04:28:57,330 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/C in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:57,330 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/4a7b63c883c34ebfa9db65af603384b2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/804c2d9e140340b39295b207dc68b3cc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/0f7a085fad8b422db63cbcdcee8cdaf9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/9ee1a632363846f9993a26b3c62cb310] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=47.1 K 2024-12-11T04:28:57,330 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a7b63c883c34ebfa9db65af603384b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1733891331599 2024-12-11T04:28:57,331 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 804c2d9e140340b39295b207dc68b3cc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733891333773 2024-12-11T04:28:57,331 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f7a085fad8b422db63cbcdcee8cdaf9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733891333814 2024-12-11T04:28:57,331 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9ee1a632363846f9993a26b3c62cb310, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733891334935 2024-12-11T04:28:57,333 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:57,335 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412115ee4fc12173a4ff996bbde4e0b4b4797_697517215fc3b7180eb3ba48942407dc store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:57,337 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412115ee4fc12173a4ff996bbde4e0b4b4797_697517215fc3b7180eb3ba48942407dc, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:57,337 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115ee4fc12173a4ff996bbde4e0b4b4797_697517215fc3b7180eb3ba48942407dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:57,358 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#C#compaction#528 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:57,358 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/601e476f47b64fe6aaa683b2cd55399a is 50, key is test_row_0/C:col10/1733891336054/Put/seqid=0 2024-12-11T04:28:57,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742443_1619 (size=4469) 2024-12-11T04:28:57,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742444_1620 (size=12375) 2024-12-11T04:28:57,461 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:28:57,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-11T04:28:57,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:57,462 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-11T04:28:57,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:28:57,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:57,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:28:57,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:57,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:28:57,462 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:57,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412111183ae0ff22a4b879634814ca218c1ed_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891336078/Put/seqid=0 2024-12-11T04:28:57,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742445_1621 (size=12304) 2024-12-11T04:28:57,769 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#A#compaction#527 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:57,770 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/63501d978bbd41e495fcc2298481ad58 is 175, key is test_row_0/A:col10/1733891336054/Put/seqid=0 2024-12-11T04:28:57,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742446_1622 (size=31363) 2024-12-11T04:28:57,781 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/601e476f47b64fe6aaa683b2cd55399a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/601e476f47b64fe6aaa683b2cd55399a 2024-12-11T04:28:57,786 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/C of 697517215fc3b7180eb3ba48942407dc into 601e476f47b64fe6aaa683b2cd55399a(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:57,786 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:57,786 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/C, priority=12, startTime=1733891337326; duration=0sec 2024-12-11T04:28:57,786 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:57,786 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:C 2024-12-11T04:28:57,786 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:57,787 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:57,787 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/B is initiating minor compaction (all files) 2024-12-11T04:28:57,787 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/B in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:57,787 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/f03de02559b946de9dd9406a1a98e064, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a621efc3be8d494586b1c8637fdc41c6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/534c140c5d464abb9c65b882860c42e3] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=35.5 K 2024-12-11T04:28:57,787 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting f03de02559b946de9dd9406a1a98e064, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1733891333773 2024-12-11T04:28:57,788 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting a621efc3be8d494586b1c8637fdc41c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1733891333814 2024-12-11T04:28:57,788 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 534c140c5d464abb9c65b882860c42e3, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733891334935 2024-12-11T04:28:57,795 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#B#compaction#530 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:57,795 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/70e97473ba4f48008a4dd70fc705dda1 is 50, key is test_row_0/B:col10/1733891336054/Put/seqid=0 2024-12-11T04:28:57,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742447_1623 (size=12409) 2024-12-11T04:28:57,809 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/70e97473ba4f48008a4dd70fc705dda1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/70e97473ba4f48008a4dd70fc705dda1 2024-12-11T04:28:57,814 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/B of 697517215fc3b7180eb3ba48942407dc into 70e97473ba4f48008a4dd70fc705dda1(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:57,814 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:57,814 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/B, priority=13, startTime=1733891337326; duration=0sec 2024-12-11T04:28:57,814 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:57,814 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:B 2024-12-11T04:28:57,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:57,888 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412111183ae0ff22a4b879634814ca218c1ed_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111183ae0ff22a4b879634814ca218c1ed_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:57,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/c1281bcfd41e46d393d58f88297d4f73, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:57,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/c1281bcfd41e46d393d58f88297d4f73 is 175, key is test_row_0/A:col10/1733891336078/Put/seqid=0 2024-12-11T04:28:57,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742448_1624 (size=31105) 2024-12-11T04:28:57,893 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=154, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/c1281bcfd41e46d393d58f88297d4f73 2024-12-11T04:28:57,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/f3710bf23d0a43ab817d105bf875e822 is 50, key is test_row_0/B:col10/1733891336078/Put/seqid=0 2024-12-11T04:28:57,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742449_1625 (size=12151) 2024-12-11T04:28:57,911 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/f3710bf23d0a43ab817d105bf875e822 2024-12-11T04:28:57,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/1c81b1f6f0814adda719bcaa74163b05 is 50, key is test_row_0/C:col10/1733891336078/Put/seqid=0 2024-12-11T04:28:57,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742450_1626 (size=12151) 2024-12-11T04:28:58,180 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/63501d978bbd41e495fcc2298481ad58 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/63501d978bbd41e495fcc2298481ad58 2024-12-11T04:28:58,184 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/A of 697517215fc3b7180eb3ba48942407dc into 63501d978bbd41e495fcc2298481ad58(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:58,184 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:58,184 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/A, priority=13, startTime=1733891337325; duration=0sec 2024-12-11T04:28:58,184 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:58,184 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:A 2024-12-11T04:28:58,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T04:28:58,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:28:58,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:58,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,207 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891398204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891398205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891398206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891398207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891398207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891398308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891398309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891398310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891398310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891398310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,324 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/1c81b1f6f0814adda719bcaa74163b05 2024-12-11T04:28:58,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/c1281bcfd41e46d393d58f88297d4f73 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c1281bcfd41e46d393d58f88297d4f73 2024-12-11T04:28:58,331 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c1281bcfd41e46d393d58f88297d4f73, entries=150, sequenceid=154, filesize=30.4 K 2024-12-11T04:28:58,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/f3710bf23d0a43ab817d105bf875e822 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/f3710bf23d0a43ab817d105bf875e822 2024-12-11T04:28:58,334 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/f3710bf23d0a43ab817d105bf875e822, entries=150, sequenceid=154, filesize=11.9 K 2024-12-11T04:28:58,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/1c81b1f6f0814adda719bcaa74163b05 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/1c81b1f6f0814adda719bcaa74163b05 2024-12-11T04:28:58,338 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/1c81b1f6f0814adda719bcaa74163b05, entries=150, sequenceid=154, filesize=11.9 K 2024-12-11T04:28:58,339 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 697517215fc3b7180eb3ba48942407dc in 877ms, sequenceid=154, compaction requested=false 2024-12-11T04:28:58,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:58,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:58,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-12-11T04:28:58,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-12-11T04:28:58,341 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-12-11T04:28:58,341 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2530 sec 2024-12-11T04:28:58,345 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 2.2570 sec 2024-12-11T04:28:58,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:58,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-11T04:28:58,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:28:58,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:58,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:28:58,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:58,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:28:58,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:58,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121122c5716728704f9db47f5e7f764e7932_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891338514/Put/seqid=0 2024-12-11T04:28:58,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742451_1627 (size=12304) 2024-12-11T04:28:58,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891398528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891398529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891398529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891398529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891398529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891398631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891398633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891398633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891398633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891398633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891398834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891398835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891398835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891398836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:58,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891398836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:58,925 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:58,928 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121122c5716728704f9db47f5e7f764e7932_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121122c5716728704f9db47f5e7f764e7932_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:58,929 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/620b317add274f728bd05aaec3dd03cd, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:58,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/620b317add274f728bd05aaec3dd03cd is 175, key is test_row_0/A:col10/1733891338514/Put/seqid=0 2024-12-11T04:28:58,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742452_1628 (size=31105) 2024-12-11T04:28:58,933 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/620b317add274f728bd05aaec3dd03cd 2024-12-11T04:28:58,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/49e05ad891c549c48c9f0199ec33a855 is 50, key is test_row_0/B:col10/1733891338514/Put/seqid=0 2024-12-11T04:28:58,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742453_1629 (size=12151) 2024-12-11T04:28:58,943 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/49e05ad891c549c48c9f0199ec33a855 2024-12-11T04:28:58,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/dd112c631d4f4026add4acefaf85647b is 50, key is test_row_0/C:col10/1733891338514/Put/seqid=0 2024-12-11T04:28:58,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742454_1630 (size=12151) 2024-12-11T04:28:59,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891399138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891399138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891399139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891399139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891399139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,356 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/dd112c631d4f4026add4acefaf85647b 2024-12-11T04:28:59,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/620b317add274f728bd05aaec3dd03cd as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/620b317add274f728bd05aaec3dd03cd 2024-12-11T04:28:59,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/620b317add274f728bd05aaec3dd03cd, entries=150, sequenceid=175, filesize=30.4 K 2024-12-11T04:28:59,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/49e05ad891c549c48c9f0199ec33a855 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/49e05ad891c549c48c9f0199ec33a855 2024-12-11T04:28:59,368 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/49e05ad891c549c48c9f0199ec33a855, entries=150, sequenceid=175, filesize=11.9 K 2024-12-11T04:28:59,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/dd112c631d4f4026add4acefaf85647b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/dd112c631d4f4026add4acefaf85647b 2024-12-11T04:28:59,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/dd112c631d4f4026add4acefaf85647b, entries=150, sequenceid=175, filesize=11.9 K 2024-12-11T04:28:59,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 697517215fc3b7180eb3ba48942407dc in 858ms, sequenceid=175, compaction requested=true 2024-12-11T04:28:59,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:59,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:28:59,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:59,372 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:59,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:28:59,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:59,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:28:59,372 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:59,372 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:59,374 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:59,374 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93573 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:59,374 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/B is initiating minor compaction (all files) 2024-12-11T04:28:59,374 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/A is initiating minor compaction (all files) 2024-12-11T04:28:59,374 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/A in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:59,374 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/63501d978bbd41e495fcc2298481ad58, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c1281bcfd41e46d393d58f88297d4f73, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/620b317add274f728bd05aaec3dd03cd] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=91.4 K 2024-12-11T04:28:59,374 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:59,374 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/63501d978bbd41e495fcc2298481ad58, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c1281bcfd41e46d393d58f88297d4f73, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/620b317add274f728bd05aaec3dd03cd] 2024-12-11T04:28:59,374 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/B in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:59,374 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/70e97473ba4f48008a4dd70fc705dda1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/f3710bf23d0a43ab817d105bf875e822, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/49e05ad891c549c48c9f0199ec33a855] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=35.9 K 2024-12-11T04:28:59,374 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63501d978bbd41e495fcc2298481ad58, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733891334935 2024-12-11T04:28:59,375 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 70e97473ba4f48008a4dd70fc705dda1, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733891334935 2024-12-11T04:28:59,375 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1281bcfd41e46d393d58f88297d4f73, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733891336071 2024-12-11T04:28:59,375 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting f3710bf23d0a43ab817d105bf875e822, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733891336071 2024-12-11T04:28:59,375 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 620b317add274f728bd05aaec3dd03cd, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733891338206 2024-12-11T04:28:59,375 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 49e05ad891c549c48c9f0199ec33a855, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733891338206 2024-12-11T04:28:59,380 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:59,382 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#B#compaction#536 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:59,382 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/03a9456292c7433894ebd0989860dd67 is 50, key is test_row_0/B:col10/1733891338514/Put/seqid=0 2024-12-11T04:28:59,382 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211d6d63b9444ee4994b4637589141e9c4d_697517215fc3b7180eb3ba48942407dc store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:59,384 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211d6d63b9444ee4994b4637589141e9c4d_697517215fc3b7180eb3ba48942407dc, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:59,384 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211d6d63b9444ee4994b4637589141e9c4d_697517215fc3b7180eb3ba48942407dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:59,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742455_1631 (size=12561) 2024-12-11T04:28:59,392 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/03a9456292c7433894ebd0989860dd67 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/03a9456292c7433894ebd0989860dd67 2024-12-11T04:28:59,397 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/B of 697517215fc3b7180eb3ba48942407dc into 03a9456292c7433894ebd0989860dd67(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:59,397 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:59,397 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/B, priority=13, startTime=1733891339372; duration=0sec 2024-12-11T04:28:59,397 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:28:59,397 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:B 2024-12-11T04:28:59,397 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:28:59,398 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:28:59,398 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/C is initiating minor compaction (all files) 2024-12-11T04:28:59,398 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/C in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:28:59,398 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/601e476f47b64fe6aaa683b2cd55399a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/1c81b1f6f0814adda719bcaa74163b05, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/dd112c631d4f4026add4acefaf85647b] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=35.8 K 2024-12-11T04:28:59,398 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 601e476f47b64fe6aaa683b2cd55399a, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1733891334935 2024-12-11T04:28:59,399 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c81b1f6f0814adda719bcaa74163b05, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733891336071 2024-12-11T04:28:59,399 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting dd112c631d4f4026add4acefaf85647b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733891338206 2024-12-11T04:28:59,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742456_1632 (size=4469) 2024-12-11T04:28:59,401 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#A#compaction#537 average throughput is 1.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:59,403 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/d659c59b869049269b7665607289f0fe is 175, key is test_row_0/A:col10/1733891338514/Put/seqid=0 2024-12-11T04:28:59,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742457_1633 (size=31515) 2024-12-11T04:28:59,407 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#C#compaction#538 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:28:59,407 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/163e5fb6b49f4604baf984068940d5f0 is 50, key is test_row_0/C:col10/1733891338514/Put/seqid=0 2024-12-11T04:28:59,412 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/d659c59b869049269b7665607289f0fe as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/d659c59b869049269b7665607289f0fe 2024-12-11T04:28:59,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742458_1634 (size=12527) 2024-12-11T04:28:59,417 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/A of 697517215fc3b7180eb3ba48942407dc into d659c59b869049269b7665607289f0fe(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:59,417 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:59,417 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/A, priority=13, startTime=1733891339372; duration=0sec 2024-12-11T04:28:59,417 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:59,417 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:A 2024-12-11T04:28:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:59,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-11T04:28:59,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:28:59,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:59,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:28:59,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:59,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:28:59,644 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:28:59,650 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211eaf4edfcd1b449aaae3aae4efdc89e4a_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891338528/Put/seqid=0 2024-12-11T04:28:59,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742459_1635 (size=12304) 2024-12-11T04:28:59,654 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:28:59,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891399652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,658 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211eaf4edfcd1b449aaae3aae4efdc89e4a_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211eaf4edfcd1b449aaae3aae4efdc89e4a_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:28:59,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891399655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891399656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,659 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/b464f9f8f0f14431b90241621277618f, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:28:59,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891399657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/b464f9f8f0f14431b90241621277618f is 175, key is test_row_0/A:col10/1733891338528/Put/seqid=0 2024-12-11T04:28:59,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891399657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742460_1636 (size=31105) 2024-12-11T04:28:59,759 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891399758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891399759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891399759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891399760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891399760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,820 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/163e5fb6b49f4604baf984068940d5f0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/163e5fb6b49f4604baf984068940d5f0 2024-12-11T04:28:59,823 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/C of 697517215fc3b7180eb3ba48942407dc into 163e5fb6b49f4604baf984068940d5f0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:28:59,823 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:28:59,823 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/C, priority=13, startTime=1733891339372; duration=0sec 2024-12-11T04:28:59,823 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:28:59,823 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:C 2024-12-11T04:28:59,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891399962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891399962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891399962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891399964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:28:59,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:28:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891399964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,064 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/b464f9f8f0f14431b90241621277618f 2024-12-11T04:29:00,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/71ae609f101a4fdba092ce122874fc47 is 50, key is test_row_0/B:col10/1733891338528/Put/seqid=0 2024-12-11T04:29:00,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742461_1637 (size=12151) 2024-12-11T04:29:00,074 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/71ae609f101a4fdba092ce122874fc47 2024-12-11T04:29:00,096 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/060d748b59f141e8b93ba548cf5cc1d8 is 50, key is test_row_0/C:col10/1733891338528/Put/seqid=0 2024-12-11T04:29:00,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742462_1638 (size=12151) 2024-12-11T04:29:00,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/060d748b59f141e8b93ba548cf5cc1d8 2024-12-11T04:29:00,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/b464f9f8f0f14431b90241621277618f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/b464f9f8f0f14431b90241621277618f 2024-12-11T04:29:00,113 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/b464f9f8f0f14431b90241621277618f, entries=150, sequenceid=197, filesize=30.4 K 2024-12-11T04:29:00,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/71ae609f101a4fdba092ce122874fc47 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/71ae609f101a4fdba092ce122874fc47 2024-12-11T04:29:00,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/71ae609f101a4fdba092ce122874fc47, entries=150, sequenceid=197, filesize=11.9 K 2024-12-11T04:29:00,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/060d748b59f141e8b93ba548cf5cc1d8 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/060d748b59f141e8b93ba548cf5cc1d8 2024-12-11T04:29:00,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/060d748b59f141e8b93ba548cf5cc1d8, entries=150, sequenceid=197, filesize=11.9 K 2024-12-11T04:29:00,122 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 697517215fc3b7180eb3ba48942407dc in 478ms, sequenceid=197, compaction requested=false 2024-12-11T04:29:00,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-11T04:29:00,191 INFO [Thread-2578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-12-11T04:29:00,192 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:29:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees 2024-12-11T04:29:00,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-11T04:29:00,193 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:29:00,194 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:29:00,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:29:00,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:00,266 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-11T04:29:00,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:29:00,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:00,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:29:00,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:00,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:29:00,267 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:00,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c95b49a128f8406ca73626d6731122b7_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891340266/Put/seqid=0 2024-12-11T04:29:00,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742463_1639 (size=14794) 2024-12-11T04:29:00,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-11T04:29:00,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891400277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891400280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891400310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891400310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891400310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,345 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-11T04:29:00,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:00,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:00,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:00,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891400411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891400414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891400414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891400414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891400415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-11T04:29:00,498 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-11T04:29:00,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:00,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:00,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:00,499 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891400614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891400617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891400618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891400618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891400619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,651 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-11T04:29:00,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:00,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:00,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:00,652 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,677 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:29:00,680 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c95b49a128f8406ca73626d6731122b7_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c95b49a128f8406ca73626d6731122b7_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:00,681 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/20b6a715b331436b903b7ce476a20cdf, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:00,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/20b6a715b331436b903b7ce476a20cdf is 175, key is test_row_0/A:col10/1733891340266/Put/seqid=0 2024-12-11T04:29:00,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742464_1640 (size=39749) 2024-12-11T04:29:00,692 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=217, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/20b6a715b331436b903b7ce476a20cdf 2024-12-11T04:29:00,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/bb57b6c9597a4fc899e2b6606b8ee703 is 50, key is test_row_0/B:col10/1733891340266/Put/seqid=0 2024-12-11T04:29:00,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742465_1641 (size=12151) 2024-12-11T04:29:00,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-11T04:29:00,804 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,805 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-11T04:29:00,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:00,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:00,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:00,805 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891400917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891400922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891400922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891400923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:00,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891400924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,958 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:00,958 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-11T04:29:00,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:00,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:00,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:00,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:00,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:01,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/bb57b6c9597a4fc899e2b6606b8ee703 2024-12-11T04:29:01,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/4424d7babfcc4931a7095b951bbaca15 is 50, key is test_row_0/C:col10/1733891340266/Put/seqid=0 2024-12-11T04:29:01,110 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-11T04:29:01,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:01,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:01,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:01,111 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:01,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:01,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:01,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742466_1642 (size=12151) 2024-12-11T04:29:01,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/4424d7babfcc4931a7095b951bbaca15 2024-12-11T04:29:01,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/20b6a715b331436b903b7ce476a20cdf as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/20b6a715b331436b903b7ce476a20cdf 2024-12-11T04:29:01,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/20b6a715b331436b903b7ce476a20cdf, entries=200, sequenceid=217, filesize=38.8 K 2024-12-11T04:29:01,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/bb57b6c9597a4fc899e2b6606b8ee703 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/bb57b6c9597a4fc899e2b6606b8ee703 2024-12-11T04:29:01,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/bb57b6c9597a4fc899e2b6606b8ee703, entries=150, sequenceid=217, filesize=11.9 K 2024-12-11T04:29:01,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/4424d7babfcc4931a7095b951bbaca15 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/4424d7babfcc4931a7095b951bbaca15 2024-12-11T04:29:01,127 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/4424d7babfcc4931a7095b951bbaca15, entries=150, sequenceid=217, filesize=11.9 K 2024-12-11T04:29:01,128 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 697517215fc3b7180eb3ba48942407dc in 862ms, sequenceid=217, compaction requested=true 2024-12-11T04:29:01,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:01,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:29:01,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:01,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:29:01,128 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:29:01,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:01,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:29:01,128 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:29:01,128 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:29:01,129 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:29:01,129 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:29:01,129 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/A is initiating minor compaction (all files) 2024-12-11T04:29:01,129 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/B is initiating minor compaction (all files) 2024-12-11T04:29:01,129 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/A in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:01,129 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/B in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:01,129 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/03a9456292c7433894ebd0989860dd67, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/71ae609f101a4fdba092ce122874fc47, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/bb57b6c9597a4fc899e2b6606b8ee703] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=36.0 K 2024-12-11T04:29:01,129 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/d659c59b869049269b7665607289f0fe, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/b464f9f8f0f14431b90241621277618f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/20b6a715b331436b903b7ce476a20cdf] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=100.0 K 2024-12-11T04:29:01,129 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:01,129 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/d659c59b869049269b7665607289f0fe, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/b464f9f8f0f14431b90241621277618f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/20b6a715b331436b903b7ce476a20cdf] 2024-12-11T04:29:01,130 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting d659c59b869049269b7665607289f0fe, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733891338206 2024-12-11T04:29:01,130 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 03a9456292c7433894ebd0989860dd67, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733891338206 2024-12-11T04:29:01,130 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting b464f9f8f0f14431b90241621277618f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733891338523 2024-12-11T04:29:01,130 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 71ae609f101a4fdba092ce122874fc47, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733891338523 2024-12-11T04:29:01,130 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting bb57b6c9597a4fc899e2b6606b8ee703, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733891339651 2024-12-11T04:29:01,130 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20b6a715b331436b903b7ce476a20cdf, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733891339651 2024-12-11T04:29:01,136 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#B#compaction#545 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:29:01,137 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/8f8f7a9f4eab4436a83ae5796a556ec7 is 50, key is test_row_0/B:col10/1733891340266/Put/seqid=0 2024-12-11T04:29:01,138 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:01,149 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211cb4133569a934dd7bdfadece8ece67d8_697517215fc3b7180eb3ba48942407dc store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:01,151 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211cb4133569a934dd7bdfadece8ece67d8_697517215fc3b7180eb3ba48942407dc, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:01,152 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211cb4133569a934dd7bdfadece8ece67d8_697517215fc3b7180eb3ba48942407dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:01,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742467_1643 (size=12663) 2024-12-11T04:29:01,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742468_1644 (size=4469) 2024-12-11T04:29:01,159 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#A#compaction#546 average throughput is 1.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:29:01,160 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/9f420ff25c8d4d1c8eaec49e8ef73608 is 175, key is test_row_0/A:col10/1733891340266/Put/seqid=0 2024-12-11T04:29:01,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742469_1645 (size=31617) 2024-12-11T04:29:01,167 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/9f420ff25c8d4d1c8eaec49e8ef73608 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9f420ff25c8d4d1c8eaec49e8ef73608 2024-12-11T04:29:01,171 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/A of 697517215fc3b7180eb3ba48942407dc into 9f420ff25c8d4d1c8eaec49e8ef73608(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:29:01,171 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:01,171 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/A, priority=13, startTime=1733891341128; duration=0sec 2024-12-11T04:29:01,171 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:29:01,171 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:A 2024-12-11T04:29:01,171 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:29:01,172 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:29:01,172 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/C is initiating minor compaction (all files) 2024-12-11T04:29:01,172 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/C in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:01,172 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/163e5fb6b49f4604baf984068940d5f0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/060d748b59f141e8b93ba548cf5cc1d8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/4424d7babfcc4931a7095b951bbaca15] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=36.0 K 2024-12-11T04:29:01,172 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 163e5fb6b49f4604baf984068940d5f0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1733891338206 2024-12-11T04:29:01,173 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 060d748b59f141e8b93ba548cf5cc1d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733891338523 2024-12-11T04:29:01,173 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4424d7babfcc4931a7095b951bbaca15, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733891339651 2024-12-11T04:29:01,179 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#C#compaction#547 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:29:01,179 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/3ab70be3ba4c47be843af7b6f8a92d71 is 50, key is test_row_0/C:col10/1733891340266/Put/seqid=0 2024-12-11T04:29:01,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742470_1646 (size=12629) 2024-12-11T04:29:01,263 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,263 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-11T04:29:01,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:01,263 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-11T04:29:01,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:29:01,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:01,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:29:01,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:01,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:29:01,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:01,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121123d166a1551248f6a210c5d36b899ffb_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891340280/Put/seqid=0 2024-12-11T04:29:01,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742471_1647 (size=12304) 2024-12-11T04:29:01,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:29:01,276 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121123d166a1551248f6a210c5d36b899ffb_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121123d166a1551248f6a210c5d36b899ffb_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:01,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/727961c027934a9caf0cc75c68c4b1c0, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:01,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/727961c027934a9caf0cc75c68c4b1c0 is 175, key is test_row_0/A:col10/1733891340280/Put/seqid=0 2024-12-11T04:29:01,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742472_1648 (size=31105) 2024-12-11T04:29:01,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-11T04:29:01,426 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:01,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:01,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891401436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891401436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891401436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891401436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891401437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,540 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891401539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891401540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891401540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,541 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891401540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891401540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,562 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/8f8f7a9f4eab4436a83ae5796a556ec7 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/8f8f7a9f4eab4436a83ae5796a556ec7 2024-12-11T04:29:01,566 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/B of 697517215fc3b7180eb3ba48942407dc into 8f8f7a9f4eab4436a83ae5796a556ec7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:29:01,566 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:01,566 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/B, priority=13, startTime=1733891341128; duration=0sec 2024-12-11T04:29:01,566 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:01,566 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:B 2024-12-11T04:29:01,586 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/3ab70be3ba4c47be843af7b6f8a92d71 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/3ab70be3ba4c47be843af7b6f8a92d71 2024-12-11T04:29:01,589 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/C of 697517215fc3b7180eb3ba48942407dc into 3ab70be3ba4c47be843af7b6f8a92d71(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:29:01,589 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:01,589 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/C, priority=13, startTime=1733891341128; duration=0sec 2024-12-11T04:29:01,589 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:01,589 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:C 2024-12-11T04:29:01,694 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=236, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/727961c027934a9caf0cc75c68c4b1c0 2024-12-11T04:29:01,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/642002e7a94b406cb9ec71899616eb13 is 50, key is test_row_0/B:col10/1733891340280/Put/seqid=0 2024-12-11T04:29:01,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742473_1649 (size=12151) 2024-12-11T04:29:01,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891401742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891401742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891401742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891401743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:01,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:01,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891401743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,046 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891402044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891402044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891402045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891402045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891402047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,104 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/642002e7a94b406cb9ec71899616eb13 2024-12-11T04:29:02,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/52200592c22c4af2be61a8cd39b8709d is 50, key is test_row_0/C:col10/1733891340280/Put/seqid=0 2024-12-11T04:29:02,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742474_1650 (size=12151) 2024-12-11T04:29:02,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-11T04:29:02,514 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/52200592c22c4af2be61a8cd39b8709d 2024-12-11T04:29:02,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/727961c027934a9caf0cc75c68c4b1c0 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/727961c027934a9caf0cc75c68c4b1c0 2024-12-11T04:29:02,522 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/727961c027934a9caf0cc75c68c4b1c0, entries=150, sequenceid=236, filesize=30.4 K 2024-12-11T04:29:02,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/642002e7a94b406cb9ec71899616eb13 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/642002e7a94b406cb9ec71899616eb13 2024-12-11T04:29:02,526 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/642002e7a94b406cb9ec71899616eb13, entries=150, sequenceid=236, filesize=11.9 K 2024-12-11T04:29:02,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/52200592c22c4af2be61a8cd39b8709d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/52200592c22c4af2be61a8cd39b8709d 2024-12-11T04:29:02,530 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/52200592c22c4af2be61a8cd39b8709d, entries=150, sequenceid=236, filesize=11.9 K 2024-12-11T04:29:02,530 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 697517215fc3b7180eb3ba48942407dc in 1267ms, sequenceid=236, compaction requested=false 2024-12-11T04:29:02,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:02,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:02,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-12-11T04:29:02,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=182 2024-12-11T04:29:02,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-12-11T04:29:02,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3370 sec 2024-12-11T04:29:02,534 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees in 2.3410 sec 2024-12-11T04:29:02,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:02,551 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-11T04:29:02,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:29:02,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:02,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:29:02,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:02,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:29:02,552 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:02,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c8fb818a5e154f63b07c05e84d2293a0_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891342550/Put/seqid=0 2024-12-11T04:29:02,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742475_1651 (size=17384) 2024-12-11T04:29:02,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891402560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891402562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891402563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891402563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891402563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891402664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891402665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891402666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891402666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891402666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891402866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891402867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891402868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891402870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:02,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891402870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:02,962 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:29:02,965 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211c8fb818a5e154f63b07c05e84d2293a0_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c8fb818a5e154f63b07c05e84d2293a0_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:02,965 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/6636792ed9b040e0a7ba307d1d878185, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:02,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/6636792ed9b040e0a7ba307d1d878185 is 175, key is test_row_0/A:col10/1733891342550/Put/seqid=0 2024-12-11T04:29:02,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742476_1652 (size=48489) 2024-12-11T04:29:03,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:03,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891403169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:03,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:03,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891403171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:03,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:03,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891403172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:03,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891403174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:03,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891403175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:03,370 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=258, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/6636792ed9b040e0a7ba307d1d878185 2024-12-11T04:29:03,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/84ab138c71314f5483f169a27c617559 is 50, key is test_row_0/B:col10/1733891342550/Put/seqid=0 2024-12-11T04:29:03,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742477_1653 (size=12251) 2024-12-11T04:29:03,381 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/84ab138c71314f5483f169a27c617559 2024-12-11T04:29:03,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/454d54f596a44f61922125b4a453e689 is 50, key is test_row_0/C:col10/1733891342550/Put/seqid=0 2024-12-11T04:29:03,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742478_1654 (size=12251) 2024-12-11T04:29:03,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:03,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891403673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:03,676 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:03,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891403674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:03,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:03,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891403675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:03,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:03,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891403678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:03,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:03,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891403681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:03,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/454d54f596a44f61922125b4a453e689 2024-12-11T04:29:03,795 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/6636792ed9b040e0a7ba307d1d878185 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6636792ed9b040e0a7ba307d1d878185 2024-12-11T04:29:03,798 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6636792ed9b040e0a7ba307d1d878185, entries=250, sequenceid=258, filesize=47.4 K 2024-12-11T04:29:03,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/84ab138c71314f5483f169a27c617559 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/84ab138c71314f5483f169a27c617559 2024-12-11T04:29:03,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/84ab138c71314f5483f169a27c617559, entries=150, sequenceid=258, filesize=12.0 K 2024-12-11T04:29:03,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/454d54f596a44f61922125b4a453e689 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/454d54f596a44f61922125b4a453e689 2024-12-11T04:29:03,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/454d54f596a44f61922125b4a453e689, entries=150, sequenceid=258, filesize=12.0 K 2024-12-11T04:29:03,807 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 697517215fc3b7180eb3ba48942407dc in 1256ms, sequenceid=258, compaction requested=true 2024-12-11T04:29:03,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:03,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:29:03,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:03,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:29:03,807 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:29:03,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:03,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:29:03,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:29:03,807 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:29:03,808 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111211 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:29:03,808 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:29:03,808 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/B is initiating minor compaction (all files) 2024-12-11T04:29:03,808 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/A is initiating minor compaction (all files) 2024-12-11T04:29:03,808 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/B in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:03,808 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/A in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:03,808 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/8f8f7a9f4eab4436a83ae5796a556ec7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/642002e7a94b406cb9ec71899616eb13, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/84ab138c71314f5483f169a27c617559] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=36.2 K 2024-12-11T04:29:03,808 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9f420ff25c8d4d1c8eaec49e8ef73608, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/727961c027934a9caf0cc75c68c4b1c0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6636792ed9b040e0a7ba307d1d878185] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=108.6 K 2024-12-11T04:29:03,809 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:03,809 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9f420ff25c8d4d1c8eaec49e8ef73608, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/727961c027934a9caf0cc75c68c4b1c0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6636792ed9b040e0a7ba307d1d878185] 2024-12-11T04:29:03,809 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f8f7a9f4eab4436a83ae5796a556ec7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733891339651 2024-12-11T04:29:03,809 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f420ff25c8d4d1c8eaec49e8ef73608, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733891339651 2024-12-11T04:29:03,809 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 642002e7a94b406cb9ec71899616eb13, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733891340274 2024-12-11T04:29:03,809 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 727961c027934a9caf0cc75c68c4b1c0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733891340274 2024-12-11T04:29:03,809 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 84ab138c71314f5483f169a27c617559, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733891341435 2024-12-11T04:29:03,810 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6636792ed9b040e0a7ba307d1d878185, keycount=250, bloomtype=ROW, size=47.4 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733891341434 2024-12-11T04:29:03,815 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:03,816 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#B#compaction#554 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:29:03,816 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/6074867de7e0466c82ef815733454af8 is 50, key is test_row_0/B:col10/1733891342550/Put/seqid=0 2024-12-11T04:29:03,817 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241211b7a5a6d202c542a79f87afa7f67e2ada_697517215fc3b7180eb3ba48942407dc store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:03,819 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241211b7a5a6d202c542a79f87afa7f67e2ada_697517215fc3b7180eb3ba48942407dc, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:03,819 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211b7a5a6d202c542a79f87afa7f67e2ada_697517215fc3b7180eb3ba48942407dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:03,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742479_1655 (size=12865) 2024-12-11T04:29:03,826 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/6074867de7e0466c82ef815733454af8 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/6074867de7e0466c82ef815733454af8 2024-12-11T04:29:03,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742480_1656 (size=4469) 2024-12-11T04:29:03,832 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#A#compaction#555 average throughput is 1.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:29:03,833 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/058e752e1aeb43a78cb7a02671f92eaa is 175, key is test_row_0/A:col10/1733891342550/Put/seqid=0 2024-12-11T04:29:03,833 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/B of 697517215fc3b7180eb3ba48942407dc into 6074867de7e0466c82ef815733454af8(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:29:03,833 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:03,833 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/B, priority=13, startTime=1733891343807; duration=0sec 2024-12-11T04:29:03,833 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:29:03,833 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:B 2024-12-11T04:29:03,833 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:29:03,835 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37031 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:29:03,835 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/C is initiating minor compaction (all files) 2024-12-11T04:29:03,835 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/C in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:03,835 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/3ab70be3ba4c47be843af7b6f8a92d71, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/52200592c22c4af2be61a8cd39b8709d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/454d54f596a44f61922125b4a453e689] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=36.2 K 2024-12-11T04:29:03,835 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ab70be3ba4c47be843af7b6f8a92d71, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733891339651 2024-12-11T04:29:03,836 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 52200592c22c4af2be61a8cd39b8709d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=236, earliestPutTs=1733891340274 2024-12-11T04:29:03,836 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 454d54f596a44f61922125b4a453e689, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733891341435 2024-12-11T04:29:03,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742481_1657 (size=31819) 2024-12-11T04:29:03,843 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#C#compaction#556 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:29:03,843 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/9b8b353f3b074ebc95aa7c7dda13f0de is 50, key is test_row_0/C:col10/1733891342550/Put/seqid=0 2024-12-11T04:29:03,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742482_1658 (size=12831) 2024-12-11T04:29:03,852 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/9b8b353f3b074ebc95aa7c7dda13f0de as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/9b8b353f3b074ebc95aa7c7dda13f0de 2024-12-11T04:29:03,857 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/C of 697517215fc3b7180eb3ba48942407dc into 9b8b353f3b074ebc95aa7c7dda13f0de(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:29:03,857 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:03,857 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/C, priority=13, startTime=1733891343807; duration=0sec 2024-12-11T04:29:03,857 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:03,857 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:C 2024-12-11T04:29:04,256 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/058e752e1aeb43a78cb7a02671f92eaa as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/058e752e1aeb43a78cb7a02671f92eaa 2024-12-11T04:29:04,262 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/A of 697517215fc3b7180eb3ba48942407dc into 058e752e1aeb43a78cb7a02671f92eaa(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:29:04,262 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:04,262 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/A, priority=13, startTime=1733891343807; duration=0sec 2024-12-11T04:29:04,262 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:04,262 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:A 2024-12-11T04:29:04,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-11T04:29:04,297 INFO [Thread-2578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-12-11T04:29:04,299 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:29:04,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees 2024-12-11T04:29:04,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-11T04:29:04,300 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:29:04,301 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:29:04,301 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:29:04,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-11T04:29:04,452 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:04,453 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-12-11T04:29:04,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:04,453 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-11T04:29:04,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:29:04,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:04,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:29:04,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:04,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:29:04,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:04,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211d909c8734edd4f308ef5a174d48eb2ae_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891342562/Put/seqid=0 2024-12-11T04:29:04,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742483_1659 (size=12454) 2024-12-11T04:29:04,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-11T04:29:04,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:04,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:04,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:04,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891404691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:04,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:04,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891404692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:04,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:04,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891404693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:04,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:04,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891404693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:04,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:04,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891404694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:04,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:04,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:04,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891404795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:04,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891404795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:04,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:04,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891404796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:04,797 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:04,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891404796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:04,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:04,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891404796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:04,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:29:04,867 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211d909c8734edd4f308ef5a174d48eb2ae_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211d909c8734edd4f308ef5a174d48eb2ae_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:04,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/c66639df35be4e6ca4b11b7e972574e3, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:04,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/c66639df35be4e6ca4b11b7e972574e3 is 175, key is test_row_0/A:col10/1733891342562/Put/seqid=0 2024-12-11T04:29:04,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742484_1660 (size=31255) 2024-12-11T04:29:04,882 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/c66639df35be4e6ca4b11b7e972574e3 2024-12-11T04:29:04,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/37cb900edac245909424deff4792285a is 50, key is test_row_0/B:col10/1733891342562/Put/seqid=0 2024-12-11T04:29:04,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742485_1661 (size=12301) 2024-12-11T04:29:04,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-11T04:29:04,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:04,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891404997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:04,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:04,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891404997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891404999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891404999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891404999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,294 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/37cb900edac245909424deff4792285a 2024-12-11T04:29:05,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/c876261693a24a90833d177aa4ffd4f4 is 50, key is test_row_0/C:col10/1733891342562/Put/seqid=0 2024-12-11T04:29:05,301 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891405301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891405302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891405303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891405312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891405312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742486_1662 (size=12301) 2024-12-11T04:29:05,326 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/c876261693a24a90833d177aa4ffd4f4 2024-12-11T04:29:05,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/c66639df35be4e6ca4b11b7e972574e3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c66639df35be4e6ca4b11b7e972574e3 2024-12-11T04:29:05,335 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c66639df35be4e6ca4b11b7e972574e3, entries=150, sequenceid=278, filesize=30.5 K 2024-12-11T04:29:05,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/37cb900edac245909424deff4792285a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/37cb900edac245909424deff4792285a 2024-12-11T04:29:05,339 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/37cb900edac245909424deff4792285a, entries=150, sequenceid=278, filesize=12.0 K 2024-12-11T04:29:05,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/c876261693a24a90833d177aa4ffd4f4 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/c876261693a24a90833d177aa4ffd4f4 2024-12-11T04:29:05,344 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/c876261693a24a90833d177aa4ffd4f4, entries=150, sequenceid=278, filesize=12.0 K 2024-12-11T04:29:05,345 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 697517215fc3b7180eb3ba48942407dc in 892ms, sequenceid=278, compaction requested=false 2024-12-11T04:29:05,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:05,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:05,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-12-11T04:29:05,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=184 2024-12-11T04:29:05,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-12-11T04:29:05,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0450 sec 2024-12-11T04:29:05,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees in 1.0490 sec 2024-12-11T04:29:05,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-11T04:29:05,403 INFO [Thread-2578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-12-11T04:29:05,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:29:05,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees 2024-12-11T04:29:05,406 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:29:05,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-11T04:29:05,406 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=185, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:29:05,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:29:05,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-11T04:29:05,558 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=186 2024-12-11T04:29:05,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:05,558 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-11T04:29:05,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:29:05,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:05,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:29:05,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:05,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:29:05,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:05,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211079e059a7d534ee18dc9a56f4d1b039f_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891344692/Put/seqid=0 2024-12-11T04:29:05,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742487_1663 (size=12454) 2024-12-11T04:29:05,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-11T04:29:05,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:05,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:05,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891405817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891405818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891405819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891405820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891405821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891405922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891405922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891405923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891405925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:05,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:29:05,976 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211079e059a7d534ee18dc9a56f4d1b039f_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211079e059a7d534ee18dc9a56f4d1b039f_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:05,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/1f591a1b54f045009979577a36bc52c1, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:05,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/1f591a1b54f045009979577a36bc52c1 is 175, key is test_row_0/A:col10/1733891344692/Put/seqid=0 2024-12-11T04:29:05,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742488_1664 (size=31255) 2024-12-11T04:29:06,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-11T04:29:06,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891406126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891406126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891406126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891406128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,382 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=297, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/1f591a1b54f045009979577a36bc52c1 2024-12-11T04:29:06,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/99f9e53c646d457bb712ec11ad3bbc9d is 50, key is test_row_0/B:col10/1733891344692/Put/seqid=0 2024-12-11T04:29:06,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742489_1665 (size=12301) 2024-12-11T04:29:06,393 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/99f9e53c646d457bb712ec11ad3bbc9d 2024-12-11T04:29:06,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/22894d93d9aa49e9a335c6ae048b807a is 50, key is test_row_0/C:col10/1733891344692/Put/seqid=0 2024-12-11T04:29:06,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742490_1666 (size=12301) 2024-12-11T04:29:06,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891406429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891406430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891406431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891406432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-11T04:29:06,806 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=297 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/22894d93d9aa49e9a335c6ae048b807a 2024-12-11T04:29:06,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/1f591a1b54f045009979577a36bc52c1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/1f591a1b54f045009979577a36bc52c1 2024-12-11T04:29:06,813 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/1f591a1b54f045009979577a36bc52c1, entries=150, sequenceid=297, filesize=30.5 K 2024-12-11T04:29:06,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/99f9e53c646d457bb712ec11ad3bbc9d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/99f9e53c646d457bb712ec11ad3bbc9d 2024-12-11T04:29:06,817 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/99f9e53c646d457bb712ec11ad3bbc9d, entries=150, sequenceid=297, filesize=12.0 K 2024-12-11T04:29:06,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/22894d93d9aa49e9a335c6ae048b807a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/22894d93d9aa49e9a335c6ae048b807a 2024-12-11T04:29:06,820 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/22894d93d9aa49e9a335c6ae048b807a, entries=150, sequenceid=297, filesize=12.0 K 2024-12-11T04:29:06,821 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 697517215fc3b7180eb3ba48942407dc in 1263ms, sequenceid=297, compaction requested=true 2024-12-11T04:29:06,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:06,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:06,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-11T04:29:06,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-11T04:29:06,824 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-12-11T04:29:06,824 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4160 sec 2024-12-11T04:29:06,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=185, table=TestAcidGuarantees in 1.4200 sec 2024-12-11T04:29:06,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:06,827 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-11T04:29:06,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:29:06,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:06,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:29:06,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:06,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:29:06,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:06,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211bac0d1bb4c644de1884185ff9a7ee196_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891345819/Put/seqid=0 2024-12-11T04:29:06,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742491_1667 (size=14994) 2024-12-11T04:29:06,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891406864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,935 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891406934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891406934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891406934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891406936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:06,968 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:06,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891406967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:07,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:07,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891407170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:07,236 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:29:07,239 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211bac0d1bb4c644de1884185ff9a7ee196_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211bac0d1bb4c644de1884185ff9a7ee196_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:07,240 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/323d170a990e4cb6a1ec923013e6566f, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:07,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/323d170a990e4cb6a1ec923013e6566f is 175, key is test_row_0/A:col10/1733891345819/Put/seqid=0 2024-12-11T04:29:07,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742492_1668 (size=39949) 2024-12-11T04:29:07,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:07,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891407475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:07,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-11T04:29:07,510 INFO [Thread-2578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 185 completed 2024-12-11T04:29:07,512 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:29:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees 2024-12-11T04:29:07,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-11T04:29:07,513 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:29:07,514 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=187, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:29:07,514 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:29:07,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-11T04:29:07,645 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/323d170a990e4cb6a1ec923013e6566f 2024-12-11T04:29:07,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/d41910f7e8f6479596d75c04ab9c251f is 50, key is test_row_0/B:col10/1733891345819/Put/seqid=0 2024-12-11T04:29:07,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742493_1669 (size=12301) 2024-12-11T04:29:07,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/d41910f7e8f6479596d75c04ab9c251f 2024-12-11T04:29:07,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/c851067a5ce349afa795e6810444d576 is 50, key is test_row_0/C:col10/1733891345819/Put/seqid=0 2024-12-11T04:29:07,665 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:07,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-11T04:29:07,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:07,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:07,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:07,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] handler.RSProcedureHandler(58): pid=188 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:07,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=188 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:07,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=188 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:07,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742494_1670 (size=12301) 2024-12-11T04:29:07,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/c851067a5ce349afa795e6810444d576 2024-12-11T04:29:07,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/323d170a990e4cb6a1ec923013e6566f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/323d170a990e4cb6a1ec923013e6566f 2024-12-11T04:29:07,676 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/323d170a990e4cb6a1ec923013e6566f, entries=200, sequenceid=315, filesize=39.0 K 2024-12-11T04:29:07,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/d41910f7e8f6479596d75c04ab9c251f as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/d41910f7e8f6479596d75c04ab9c251f 2024-12-11T04:29:07,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/d41910f7e8f6479596d75c04ab9c251f, entries=150, sequenceid=315, filesize=12.0 K 2024-12-11T04:29:07,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/c851067a5ce349afa795e6810444d576 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/c851067a5ce349afa795e6810444d576 2024-12-11T04:29:07,684 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/c851067a5ce349afa795e6810444d576, entries=150, sequenceid=315, filesize=12.0 K 2024-12-11T04:29:07,684 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 697517215fc3b7180eb3ba48942407dc in 858ms, sequenceid=315, compaction requested=true 2024-12-11T04:29:07,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:07,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:29:07,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:07,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:29:07,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:07,685 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:29:07,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:29:07,685 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:29:07,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:29:07,686 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49768 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:29:07,686 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134278 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:29:07,686 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/B is initiating minor compaction (all files) 2024-12-11T04:29:07,686 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/A is initiating minor compaction (all files) 2024-12-11T04:29:07,686 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/B in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:07,686 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/A in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:07,686 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/6074867de7e0466c82ef815733454af8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/37cb900edac245909424deff4792285a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/99f9e53c646d457bb712ec11ad3bbc9d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/d41910f7e8f6479596d75c04ab9c251f] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=48.6 K 2024-12-11T04:29:07,686 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/058e752e1aeb43a78cb7a02671f92eaa, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c66639df35be4e6ca4b11b7e972574e3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/1f591a1b54f045009979577a36bc52c1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/323d170a990e4cb6a1ec923013e6566f] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=131.1 K 2024-12-11T04:29:07,686 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:07,686 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/058e752e1aeb43a78cb7a02671f92eaa, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c66639df35be4e6ca4b11b7e972574e3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/1f591a1b54f045009979577a36bc52c1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/323d170a990e4cb6a1ec923013e6566f] 2024-12-11T04:29:07,686 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 6074867de7e0466c82ef815733454af8, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733891341435 2024-12-11T04:29:07,686 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 058e752e1aeb43a78cb7a02671f92eaa, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733891341435 2024-12-11T04:29:07,687 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 37cb900edac245909424deff4792285a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733891342558 2024-12-11T04:29:07,687 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting c66639df35be4e6ca4b11b7e972574e3, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733891342558 2024-12-11T04:29:07,687 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 99f9e53c646d457bb712ec11ad3bbc9d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733891344692 2024-12-11T04:29:07,687 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f591a1b54f045009979577a36bc52c1, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733891344692 2024-12-11T04:29:07,687 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting d41910f7e8f6479596d75c04ab9c251f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733891345816 2024-12-11T04:29:07,687 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 323d170a990e4cb6a1ec923013e6566f, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733891345816 2024-12-11T04:29:07,694 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#B#compaction#566 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:29:07,694 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/ff1fdc5aef27492fb9d42764077295d3 is 50, key is test_row_0/B:col10/1733891345819/Put/seqid=0 2024-12-11T04:29:07,695 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:07,697 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412115d161b1ce239422784241abb80accd0b_697517215fc3b7180eb3ba48942407dc store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:07,699 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412115d161b1ce239422784241abb80accd0b_697517215fc3b7180eb3ba48942407dc, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:07,699 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412115d161b1ce239422784241abb80accd0b_697517215fc3b7180eb3ba48942407dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:07,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742495_1671 (size=13051) 2024-12-11T04:29:07,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742496_1672 (size=4469) 2024-12-11T04:29:07,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-11T04:29:07,818 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:07,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=188 2024-12-11T04:29:07,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:07,818 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-11T04:29:07,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:29:07,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:07,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:29:07,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:07,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:29:07,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:07,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412111f1b6a737a8442e7b66686d78e3f910a_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891346857/Put/seqid=0 2024-12-11T04:29:07,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742497_1673 (size=12454) 2024-12-11T04:29:07,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:07,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:07,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891407954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:07,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891407954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:07,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891407955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:07,960 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:07,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891407957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:07,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:07,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891407979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891408059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891408059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891408059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891408061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,104 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#A#compaction#567 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:29:08,105 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/6996845c523341a49bbf3ee6054ef5ab is 175, key is test_row_0/A:col10/1733891345819/Put/seqid=0 2024-12-11T04:29:08,106 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/ff1fdc5aef27492fb9d42764077295d3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/ff1fdc5aef27492fb9d42764077295d3 2024-12-11T04:29:08,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742498_1674 (size=32005) 2024-12-11T04:29:08,111 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/B of 697517215fc3b7180eb3ba48942407dc into ff1fdc5aef27492fb9d42764077295d3(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:29:08,111 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:08,111 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/B, priority=12, startTime=1733891347685; duration=0sec 2024-12-11T04:29:08,111 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:29:08,111 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:B 2024-12-11T04:29:08,112 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-11T04:29:08,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-11T04:29:08,121 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49734 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-11T04:29:08,121 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/C is initiating minor compaction (all files) 2024-12-11T04:29:08,121 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/C in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:08,121 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/9b8b353f3b074ebc95aa7c7dda13f0de, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/c876261693a24a90833d177aa4ffd4f4, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/22894d93d9aa49e9a335c6ae048b807a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/c851067a5ce349afa795e6810444d576] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=48.6 K 2024-12-11T04:29:08,122 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/6996845c523341a49bbf3ee6054ef5ab as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6996845c523341a49bbf3ee6054ef5ab 2024-12-11T04:29:08,122 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b8b353f3b074ebc95aa7c7dda13f0de, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1733891341435 2024-12-11T04:29:08,122 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting c876261693a24a90833d177aa4ffd4f4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1733891342558 2024-12-11T04:29:08,122 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 22894d93d9aa49e9a335c6ae048b807a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=297, earliestPutTs=1733891344692 2024-12-11T04:29:08,123 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting c851067a5ce349afa795e6810444d576, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733891345816 2024-12-11T04:29:08,126 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/A of 697517215fc3b7180eb3ba48942407dc into 6996845c523341a49bbf3ee6054ef5ab(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:29:08,127 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:08,127 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/A, priority=12, startTime=1733891347684; duration=0sec 2024-12-11T04:29:08,127 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:08,127 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:A 2024-12-11T04:29:08,132 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#C#compaction#569 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:29:08,133 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/098b3065a6ed46ca91be3a5fc5c267cd is 50, key is test_row_0/C:col10/1733891345819/Put/seqid=0 2024-12-11T04:29:08,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742499_1675 (size=13017) 2024-12-11T04:29:08,140 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/098b3065a6ed46ca91be3a5fc5c267cd as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/098b3065a6ed46ca91be3a5fc5c267cd 2024-12-11T04:29:08,143 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/C of 697517215fc3b7180eb3ba48942407dc into 098b3065a6ed46ca91be3a5fc5c267cd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:29:08,144 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:08,144 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/C, priority=12, startTime=1733891347685; duration=0sec 2024-12-11T04:29:08,144 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:08,144 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:C 2024-12-11T04:29:08,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:29:08,231 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412111f1b6a737a8442e7b66686d78e3f910a_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111f1b6a737a8442e7b66686d78e3f910a_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:08,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/fc5ad08c9d2e4e3a9f97d48aa0323a90, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:08,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/fc5ad08c9d2e4e3a9f97d48aa0323a90 is 175, key is test_row_0/A:col10/1733891346857/Put/seqid=0 2024-12-11T04:29:08,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742500_1676 (size=31255) 2024-12-11T04:29:08,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891408262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891408262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891408263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891408265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891408564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891408565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891408567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891408567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:08,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-11T04:29:08,636 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=333, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/fc5ad08c9d2e4e3a9f97d48aa0323a90 2024-12-11T04:29:08,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/1684be0bbfd5442fa29496c7804ded8d is 50, key is test_row_0/B:col10/1733891346857/Put/seqid=0 2024-12-11T04:29:08,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742501_1677 (size=12301) 2024-12-11T04:29:08,646 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/1684be0bbfd5442fa29496c7804ded8d 2024-12-11T04:29:08,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/3c8c52a562264659ad3049dbf44de3c6 is 50, key is test_row_0/C:col10/1733891346857/Put/seqid=0 2024-12-11T04:29:08,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742502_1678 (size=12301) 2024-12-11T04:29:08,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:08,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59734 deadline: 1733891408986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,055 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/3c8c52a562264659ad3049dbf44de3c6 2024-12-11T04:29:09,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/fc5ad08c9d2e4e3a9f97d48aa0323a90 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/fc5ad08c9d2e4e3a9f97d48aa0323a90 2024-12-11T04:29:09,063 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/fc5ad08c9d2e4e3a9f97d48aa0323a90, entries=150, sequenceid=333, filesize=30.5 K 2024-12-11T04:29:09,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/1684be0bbfd5442fa29496c7804ded8d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/1684be0bbfd5442fa29496c7804ded8d 2024-12-11T04:29:09,066 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/1684be0bbfd5442fa29496c7804ded8d, entries=150, sequenceid=333, filesize=12.0 K 2024-12-11T04:29:09,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/3c8c52a562264659ad3049dbf44de3c6 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/3c8c52a562264659ad3049dbf44de3c6 2024-12-11T04:29:09,069 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/3c8c52a562264659ad3049dbf44de3c6, entries=150, sequenceid=333, filesize=12.0 K 2024-12-11T04:29:09,070 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 697517215fc3b7180eb3ba48942407dc in 1252ms, sequenceid=333, compaction requested=false 2024-12-11T04:29:09,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:09,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:09,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=188}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=188 2024-12-11T04:29:09,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=188 2024-12-11T04:29:09,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:09,072 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-11T04:29:09,073 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=188, resume processing ppid=187 2024-12-11T04:29:09,073 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, ppid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5570 sec 2024-12-11T04:29:09,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:29:09,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:09,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:29:09,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:09,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:29:09,074 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:09,075 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=187, table=TestAcidGuarantees in 1.5620 sec 2024-12-11T04:29:09,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117da59b9319db4252920a655b76845969_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891349072/Put/seqid=0 2024-12-11T04:29:09,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742503_1679 (size=12454) 2024-12-11T04:29:09,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891409086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891409086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891409086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,089 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891409088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891409189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,191 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891409189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891409189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891409190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891409392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891409392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891409392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891409393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,483 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:29:09,487 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412117da59b9319db4252920a655b76845969_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117da59b9319db4252920a655b76845969_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:09,487 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/73c9326c17cd401a8aa8167f59d3d858, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:09,488 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/73c9326c17cd401a8aa8167f59d3d858 is 175, key is test_row_0/A:col10/1733891349072/Put/seqid=0 2024-12-11T04:29:09,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742504_1680 (size=31255) 2024-12-11T04:29:09,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-11T04:29:09,616 INFO [Thread-2578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 187 completed 2024-12-11T04:29:09,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-11T04:29:09,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=189, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees 2024-12-11T04:29:09,620 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=189, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-11T04:29:09,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-11T04:29:09,620 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=189, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T04:29:09,620 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T04:29:09,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891409696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891409696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891409697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891409697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-11T04:29:09,771 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-12-11T04:29:09,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:09,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:09,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:09,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:09,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:09,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:09,781 DEBUG [Thread-2585 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x410bf0c8 to 127.0.0.1:50078 2024-12-11T04:29:09,781 DEBUG [Thread-2587 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x67adb273 to 127.0.0.1:50078 2024-12-11T04:29:09,781 DEBUG [Thread-2585 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:09,781 DEBUG [Thread-2587 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:09,782 DEBUG [Thread-2581 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06556601 to 127.0.0.1:50078 2024-12-11T04:29:09,782 DEBUG [Thread-2581 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:09,783 DEBUG [Thread-2583 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x458a85fd to 127.0.0.1:50078 2024-12-11T04:29:09,783 DEBUG [Thread-2583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:09,783 DEBUG [Thread-2579 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bb6288a to 127.0.0.1:50078 2024-12-11T04:29:09,783 DEBUG [Thread-2579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:09,892 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=357, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/73c9326c17cd401a8aa8167f59d3d858 2024-12-11T04:29:09,897 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/a9f637ef28d74bb3bab1cc842effa4eb is 50, key is test_row_0/B:col10/1733891349072/Put/seqid=0 2024-12-11T04:29:09,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742505_1681 (size=12301) 2024-12-11T04:29:09,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-11T04:29:09,924 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:09,924 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-12-11T04:29:09,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:09,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:09,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:09,925 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:09,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,076 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:10,077 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-12-11T04:29:10,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:10,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:10,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59662 deadline: 1733891410200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:10,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:10,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59714 deadline: 1733891410200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:10,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:10,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59706 deadline: 1733891410201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:10,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-11T04:29:10,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59744 deadline: 1733891410202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:10,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-11T04:29:10,229 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:10,229 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-12-11T04:29:10,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:10,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,229 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,300 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/a9f637ef28d74bb3bab1cc842effa4eb 2024-12-11T04:29:10,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/fd47a7807b3a476ab145fd0e56df5317 is 50, key is test_row_0/C:col10/1733891349072/Put/seqid=0 2024-12-11T04:29:10,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742506_1682 (size=12301) 2024-12-11T04:29:10,381 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:10,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-12-11T04:29:10,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:10,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,533 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:10,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-12-11T04:29:10,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:10,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,534 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-1 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,685 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:10,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-12-11T04:29:10,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:10,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,686 ERROR [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] handler.RSProcedureHandler(58): pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-2 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=190 java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4114): Remote procedure failed, pid=190 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T04:29:10,708 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/fd47a7807b3a476ab145fd0e56df5317 2024-12-11T04:29:10,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/73c9326c17cd401a8aa8167f59d3d858 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/73c9326c17cd401a8aa8167f59d3d858 2024-12-11T04:29:10,714 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/73c9326c17cd401a8aa8167f59d3d858, entries=150, sequenceid=357, filesize=30.5 K 2024-12-11T04:29:10,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/a9f637ef28d74bb3bab1cc842effa4eb as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a9f637ef28d74bb3bab1cc842effa4eb 2024-12-11T04:29:10,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a9f637ef28d74bb3bab1cc842effa4eb, entries=150, sequenceid=357, filesize=12.0 K 2024-12-11T04:29:10,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/fd47a7807b3a476ab145fd0e56df5317 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/fd47a7807b3a476ab145fd0e56df5317 2024-12-11T04:29:10,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/fd47a7807b3a476ab145fd0e56df5317, entries=150, sequenceid=357, filesize=12.0 K 2024-12-11T04:29:10,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 697517215fc3b7180eb3ba48942407dc in 1648ms, sequenceid=357, compaction requested=true 2024-12-11T04:29:10,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:10,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:A, priority=-2147483648, current under compaction store size is 1 2024-12-11T04:29:10,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:10,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:B, priority=-2147483648, current under compaction store size is 2 2024-12-11T04:29:10,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:10,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 697517215fc3b7180eb3ba48942407dc:C, priority=-2147483648, current under compaction store size is 3 2024-12-11T04:29:10,720 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:29:10,720 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:29:10,720 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:29:10,721 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94515 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:29:10,721 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:29:10,721 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/A is initiating minor compaction (all files) 2024-12-11T04:29:10,721 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/B is initiating minor compaction (all files) 2024-12-11T04:29:10,721 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/A in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,721 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/B in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,721 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6996845c523341a49bbf3ee6054ef5ab, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/fc5ad08c9d2e4e3a9f97d48aa0323a90, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/73c9326c17cd401a8aa8167f59d3d858] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=92.3 K 2024-12-11T04:29:10,721 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/ff1fdc5aef27492fb9d42764077295d3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/1684be0bbfd5442fa29496c7804ded8d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a9f637ef28d74bb3bab1cc842effa4eb] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=36.8 K 2024-12-11T04:29:10,721 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,721 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. files: [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6996845c523341a49bbf3ee6054ef5ab, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/fc5ad08c9d2e4e3a9f97d48aa0323a90, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/73c9326c17cd401a8aa8167f59d3d858] 2024-12-11T04:29:10,722 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6996845c523341a49bbf3ee6054ef5ab, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733891345816 2024-12-11T04:29:10,722 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting ff1fdc5aef27492fb9d42764077295d3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733891345816 2024-12-11T04:29:10,722 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting 1684be0bbfd5442fa29496c7804ded8d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733891346857 2024-12-11T04:29:10,722 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc5ad08c9d2e4e3a9f97d48aa0323a90, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733891346857 2024-12-11T04:29:10,722 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73c9326c17cd401a8aa8167f59d3d858, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733891347954 2024-12-11T04:29:10,722 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] compactions.Compactor(224): Compacting a9f637ef28d74bb3bab1cc842effa4eb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733891347954 2024-12-11T04:29:10,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-11T04:29:10,728 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#B#compaction#575 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:29:10,728 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/c877eeb38dc942cdadd70f0cf8c16f6a is 50, key is test_row_0/B:col10/1733891349072/Put/seqid=0 2024-12-11T04:29:10,731 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:10,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742507_1683 (size=13153) 2024-12-11T04:29:10,733 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412110aa2e461b2024e2dbdb16042db4e33a7_697517215fc3b7180eb3ba48942407dc store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:10,759 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412110aa2e461b2024e2dbdb16042db4e33a7_697517215fc3b7180eb3ba48942407dc, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:10,759 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412110aa2e461b2024e2dbdb16042db4e33a7_697517215fc3b7180eb3ba48942407dc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:10,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742508_1684 (size=4469) 2024-12-11T04:29:10,766 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#A#compaction#576 average throughput is 0.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:29:10,767 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/9b4034e7a3d64298a71d5ddc36a0dd96 is 175, key is test_row_0/A:col10/1733891349072/Put/seqid=0 2024-12-11T04:29:10,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742509_1685 (size=32107) 2024-12-11T04:29:10,778 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/9b4034e7a3d64298a71d5ddc36a0dd96 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9b4034e7a3d64298a71d5ddc36a0dd96 2024-12-11T04:29:10,782 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/A of 697517215fc3b7180eb3ba48942407dc into 9b4034e7a3d64298a71d5ddc36a0dd96(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:29:10,782 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:10,782 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/A, priority=13, startTime=1733891350720; duration=0sec 2024-12-11T04:29:10,782 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-11T04:29:10,782 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:A 2024-12-11T04:29:10,782 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-11T04:29:10,783 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-11T04:29:10,783 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1540): 697517215fc3b7180eb3ba48942407dc/C is initiating minor compaction (all files) 2024-12-11T04:29:10,783 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 697517215fc3b7180eb3ba48942407dc/C in TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,783 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/098b3065a6ed46ca91be3a5fc5c267cd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/3c8c52a562264659ad3049dbf44de3c6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/fd47a7807b3a476ab145fd0e56df5317] into tmpdir=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp, totalSize=36.7 K 2024-12-11T04:29:10,784 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 098b3065a6ed46ca91be3a5fc5c267cd, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1733891345816 2024-12-11T04:29:10,784 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c8c52a562264659ad3049dbf44de3c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1733891346857 2024-12-11T04:29:10,784 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd47a7807b3a476ab145fd0e56df5317, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1733891347954 2024-12-11T04:29:10,794 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 697517215fc3b7180eb3ba48942407dc#C#compaction#577 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-11T04:29:10,795 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/390cb782c9f746319392117368b73b6c is 50, key is test_row_0/C:col10/1733891349072/Put/seqid=0 2024-12-11T04:29:10,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742510_1686 (size=13119) 2024-12-11T04:29:10,802 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/390cb782c9f746319392117368b73b6c as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/390cb782c9f746319392117368b73b6c 2024-12-11T04:29:10,806 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/C of 697517215fc3b7180eb3ba48942407dc into 390cb782c9f746319392117368b73b6c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:29:10,806 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:10,806 INFO [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/C, priority=13, startTime=1733891350720; duration=0sec 2024-12-11T04:29:10,807 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:10,807 DEBUG [RS:0;5f466b3719ec:39071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:C 2024-12-11T04:29:10,837 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:10,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39071 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=190 2024-12-11T04:29:10,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:10,838 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-11T04:29:10,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:29:10,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:10,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:29:10,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:10,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:29:10,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:10,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412112763a8cbb4c24c13abadba5261344423_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891349084/Put/seqid=0 2024-12-11T04:29:10,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742511_1687 (size=12454) 2024-12-11T04:29:11,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39071 {}] regionserver.HRegion(8581): Flush requested on 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:11,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. as already flushing 2024-12-11T04:29:11,009 DEBUG [Thread-2576 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x150e08ed to 127.0.0.1:50078 2024-12-11T04:29:11,009 DEBUG [Thread-2576 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:11,135 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/c877eeb38dc942cdadd70f0cf8c16f6a as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/c877eeb38dc942cdadd70f0cf8c16f6a 2024-12-11T04:29:11,139 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 697517215fc3b7180eb3ba48942407dc/B of 697517215fc3b7180eb3ba48942407dc into c877eeb38dc942cdadd70f0cf8c16f6a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-11T04:29:11,139 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:11,139 INFO [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc., storeName=697517215fc3b7180eb3ba48942407dc/B, priority=13, startTime=1733891350720; duration=0sec 2024-12-11T04:29:11,139 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-11T04:29:11,139 DEBUG [RS:0;5f466b3719ec:39071-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 697517215fc3b7180eb3ba48942407dc:B 2024-12-11T04:29:11,233 DEBUG [Thread-2570 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1584f18a to 127.0.0.1:50078 2024-12-11T04:29:11,233 DEBUG [Thread-2568 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17327621 to 127.0.0.1:50078 2024-12-11T04:29:11,233 DEBUG [Thread-2572 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b914bf4 to 127.0.0.1:50078 2024-12-11T04:29:11,233 DEBUG [Thread-2568 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:11,233 DEBUG [Thread-2570 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:11,233 DEBUG [Thread-2572 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:11,234 DEBUG [Thread-2574 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f6a59e4 to 127.0.0.1:50078 2024-12-11T04:29:11,234 DEBUG [Thread-2574 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:11,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:29:11,255 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412112763a8cbb4c24c13abadba5261344423_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412112763a8cbb4c24c13abadba5261344423_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:11,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/f132e548061b4209a19d6fb02ec941b1, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:11,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/f132e548061b4209a19d6fb02ec941b1 is 175, key is test_row_0/A:col10/1733891349084/Put/seqid=0 2024-12-11T04:29:11,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742512_1688 (size=31255) 2024-12-11T04:29:11,660 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=374, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/f132e548061b4209a19d6fb02ec941b1 2024-12-11T04:29:11,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/5e3b4575819e4042bd88249d84dc2e17 is 50, key is test_row_0/B:col10/1733891349084/Put/seqid=0 2024-12-11T04:29:11,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742513_1689 (size=12301) 2024-12-11T04:29:11,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-11T04:29:12,070 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/5e3b4575819e4042bd88249d84dc2e17 2024-12-11T04:29:12,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/2e5b0b14f83a4c08bedf29dc693ca666 is 50, key is test_row_0/C:col10/1733891349084/Put/seqid=0 2024-12-11T04:29:12,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742514_1690 (size=12301) 2024-12-11T04:29:12,479 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/2e5b0b14f83a4c08bedf29dc693ca666 2024-12-11T04:29:12,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/f132e548061b4209a19d6fb02ec941b1 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/f132e548061b4209a19d6fb02ec941b1 2024-12-11T04:29:12,486 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/f132e548061b4209a19d6fb02ec941b1, entries=150, sequenceid=374, filesize=30.5 K 2024-12-11T04:29:12,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/5e3b4575819e4042bd88249d84dc2e17 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/5e3b4575819e4042bd88249d84dc2e17 2024-12-11T04:29:12,489 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/5e3b4575819e4042bd88249d84dc2e17, entries=150, sequenceid=374, filesize=12.0 K 2024-12-11T04:29:12,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/2e5b0b14f83a4c08bedf29dc693ca666 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/2e5b0b14f83a4c08bedf29dc693ca666 2024-12-11T04:29:12,492 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/2e5b0b14f83a4c08bedf29dc693ca666, entries=150, sequenceid=374, filesize=12.0 K 2024-12-11T04:29:12,493 INFO [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=33.54 KB/34350 for 697517215fc3b7180eb3ba48942407dc in 1655ms, sequenceid=374, compaction requested=false 2024-12-11T04:29:12,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:12,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:12,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/5f466b3719ec:0-0 {event_type=RS_FLUSH_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-12-11T04:29:12,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-12-11T04:29:12,495 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=189 2024-12-11T04:29:12,495 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=189, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8740 sec 2024-12-11T04:29:12,496 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=189, table=TestAcidGuarantees in 2.8770 sec 2024-12-11T04:29:13,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-12-11T04:29:13,724 INFO [Thread-2578 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 189 completed 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 55 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6679 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6665 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6558 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6697 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6671 2024-12-11T04:29:13,724 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-11T04:29:13,724 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T04:29:13,724 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fc332d8 to 127.0.0.1:50078 2024-12-11T04:29:13,725 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:13,725 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-11T04:29:13,725 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-11T04:29:13,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-11T04:29:13,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-11T04:29:13,728 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891353727"}]},"ts":"1733891353727"} 2024-12-11T04:29:13,728 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-11T04:29:13,730 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-11T04:29:13,730 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-11T04:29:13,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=697517215fc3b7180eb3ba48942407dc, UNASSIGN}] 2024-12-11T04:29:13,732 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=697517215fc3b7180eb3ba48942407dc, UNASSIGN 2024-12-11T04:29:13,732 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=697517215fc3b7180eb3ba48942407dc, regionState=CLOSING, regionLocation=5f466b3719ec,39071,1733891180267 2024-12-11T04:29:13,733 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-11T04:29:13,733 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=194, ppid=193, state=RUNNABLE; CloseRegionProcedure 697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267}] 2024-12-11T04:29:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-11T04:29:13,884 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:13,884 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] handler.UnassignRegionHandler(124): Close 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:13,884 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-11T04:29:13,884 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1681): Closing 697517215fc3b7180eb3ba48942407dc, disabling compactions & flushes 2024-12-11T04:29:13,884 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:13,884 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:13,884 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. after waiting 0 ms 2024-12-11T04:29:13,884 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:13,884 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(2837): Flushing 697517215fc3b7180eb3ba48942407dc 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-11T04:29:13,885 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=A 2024-12-11T04:29:13,885 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:13,885 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=B 2024-12-11T04:29:13,885 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:13,885 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 697517215fc3b7180eb3ba48942407dc, store=C 2024-12-11T04:29:13,885 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-11T04:29:13,889 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e168d4168e6c42b28b9ec2b6325a094a_697517215fc3b7180eb3ba48942407dc is 50, key is test_row_0/A:col10/1733891351008/Put/seqid=0 2024-12-11T04:29:13,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742515_1691 (size=12454) 2024-12-11T04:29:14,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-11T04:29:14,293 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T04:29:14,296 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241211e168d4168e6c42b28b9ec2b6325a094a_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e168d4168e6c42b28b9ec2b6325a094a_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:14,296 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/5c9ca58efef74c77993811dbbeef1a24, store: [table=TestAcidGuarantees family=A region=697517215fc3b7180eb3ba48942407dc] 2024-12-11T04:29:14,297 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/5c9ca58efef74c77993811dbbeef1a24 is 175, key is test_row_0/A:col10/1733891351008/Put/seqid=0 2024-12-11T04:29:14,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742516_1692 (size=31255) 2024-12-11T04:29:14,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-11T04:29:14,700 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=383, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/5c9ca58efef74c77993811dbbeef1a24 2024-12-11T04:29:14,705 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/af1b3f15b8d949d989bae45468c2ee2d is 50, key is test_row_0/B:col10/1733891351008/Put/seqid=0 2024-12-11T04:29:14,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742517_1693 (size=12301) 2024-12-11T04:29:14,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-11T04:29:15,108 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/af1b3f15b8d949d989bae45468c2ee2d 2024-12-11T04:29:15,114 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/e080f2acf8464816872ded806faef8d3 is 50, key is test_row_0/C:col10/1733891351008/Put/seqid=0 2024-12-11T04:29:15,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742518_1694 (size=12301) 2024-12-11T04:29:15,518 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/e080f2acf8464816872ded806faef8d3 2024-12-11T04:29:15,521 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/A/5c9ca58efef74c77993811dbbeef1a24 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/5c9ca58efef74c77993811dbbeef1a24 2024-12-11T04:29:15,524 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/5c9ca58efef74c77993811dbbeef1a24, entries=150, sequenceid=383, filesize=30.5 K 2024-12-11T04:29:15,524 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/B/af1b3f15b8d949d989bae45468c2ee2d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/af1b3f15b8d949d989bae45468c2ee2d 2024-12-11T04:29:15,527 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/af1b3f15b8d949d989bae45468c2ee2d, entries=150, sequenceid=383, filesize=12.0 K 2024-12-11T04:29:15,528 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/.tmp/C/e080f2acf8464816872ded806faef8d3 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/e080f2acf8464816872ded806faef8d3 2024-12-11T04:29:15,530 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/e080f2acf8464816872ded806faef8d3, entries=150, sequenceid=383, filesize=12.0 K 2024-12-11T04:29:15,531 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 697517215fc3b7180eb3ba48942407dc in 1647ms, sequenceid=383, compaction requested=true 2024-12-11T04:29:15,531 DEBUG [StoreCloser-TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/4a44d0d5f9b84f12a7aa34e72697b2e9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9222775907c44102a265724456338d39, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/57a60ec820b64a44bc8bcc76da02adcd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/da62eb92b9db4a29b921aedfbdd3b4b1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/5becf625da754edcb0ce57ad03d96984, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/bd4db745a5f5432a94d490075e2b22a9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/d082ec823e514b61ad921c6a8c99db0f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6197f54758f249da9d471bad5c2ab171, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/af34135ee36741d4bd7678623edc7dd7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/63501d978bbd41e495fcc2298481ad58, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c1281bcfd41e46d393d58f88297d4f73, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/d659c59b869049269b7665607289f0fe, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/620b317add274f728bd05aaec3dd03cd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/b464f9f8f0f14431b90241621277618f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/20b6a715b331436b903b7ce476a20cdf, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9f420ff25c8d4d1c8eaec49e8ef73608, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/727961c027934a9caf0cc75c68c4b1c0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6636792ed9b040e0a7ba307d1d878185, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/058e752e1aeb43a78cb7a02671f92eaa, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c66639df35be4e6ca4b11b7e972574e3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/1f591a1b54f045009979577a36bc52c1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/323d170a990e4cb6a1ec923013e6566f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6996845c523341a49bbf3ee6054ef5ab, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/fc5ad08c9d2e4e3a9f97d48aa0323a90, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/73c9326c17cd401a8aa8167f59d3d858] to archive 2024-12-11T04:29:15,532 DEBUG [StoreCloser-TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:29:15,534 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/da62eb92b9db4a29b921aedfbdd3b4b1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/da62eb92b9db4a29b921aedfbdd3b4b1 2024-12-11T04:29:15,534 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/4a44d0d5f9b84f12a7aa34e72697b2e9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/4a44d0d5f9b84f12a7aa34e72697b2e9 2024-12-11T04:29:15,534 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/57a60ec820b64a44bc8bcc76da02adcd to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/57a60ec820b64a44bc8bcc76da02adcd 2024-12-11T04:29:15,534 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9222775907c44102a265724456338d39 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9222775907c44102a265724456338d39 2024-12-11T04:29:15,535 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/bd4db745a5f5432a94d490075e2b22a9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/bd4db745a5f5432a94d490075e2b22a9 2024-12-11T04:29:15,535 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/d082ec823e514b61ad921c6a8c99db0f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/d082ec823e514b61ad921c6a8c99db0f 2024-12-11T04:29:15,535 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6197f54758f249da9d471bad5c2ab171 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6197f54758f249da9d471bad5c2ab171 2024-12-11T04:29:15,535 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/5becf625da754edcb0ce57ad03d96984 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/5becf625da754edcb0ce57ad03d96984 2024-12-11T04:29:15,536 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/af34135ee36741d4bd7678623edc7dd7 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/af34135ee36741d4bd7678623edc7dd7 2024-12-11T04:29:15,536 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c1281bcfd41e46d393d58f88297d4f73 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c1281bcfd41e46d393d58f88297d4f73 2024-12-11T04:29:15,536 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/d659c59b869049269b7665607289f0fe to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/d659c59b869049269b7665607289f0fe 2024-12-11T04:29:15,536 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/63501d978bbd41e495fcc2298481ad58 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/63501d978bbd41e495fcc2298481ad58 2024-12-11T04:29:15,537 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/620b317add274f728bd05aaec3dd03cd to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/620b317add274f728bd05aaec3dd03cd 2024-12-11T04:29:15,537 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/b464f9f8f0f14431b90241621277618f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/b464f9f8f0f14431b90241621277618f 2024-12-11T04:29:15,537 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/20b6a715b331436b903b7ce476a20cdf to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/20b6a715b331436b903b7ce476a20cdf 2024-12-11T04:29:15,538 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9f420ff25c8d4d1c8eaec49e8ef73608 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9f420ff25c8d4d1c8eaec49e8ef73608 2024-12-11T04:29:15,538 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6636792ed9b040e0a7ba307d1d878185 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6636792ed9b040e0a7ba307d1d878185 2024-12-11T04:29:15,538 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c66639df35be4e6ca4b11b7e972574e3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/c66639df35be4e6ca4b11b7e972574e3 2024-12-11T04:29:15,538 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/058e752e1aeb43a78cb7a02671f92eaa to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/058e752e1aeb43a78cb7a02671f92eaa 2024-12-11T04:29:15,539 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6996845c523341a49bbf3ee6054ef5ab to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/6996845c523341a49bbf3ee6054ef5ab 2024-12-11T04:29:15,539 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/323d170a990e4cb6a1ec923013e6566f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/323d170a990e4cb6a1ec923013e6566f 2024-12-11T04:29:15,539 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/727961c027934a9caf0cc75c68c4b1c0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/727961c027934a9caf0cc75c68c4b1c0 2024-12-11T04:29:15,539 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/1f591a1b54f045009979577a36bc52c1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/1f591a1b54f045009979577a36bc52c1 2024-12-11T04:29:15,539 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/fc5ad08c9d2e4e3a9f97d48aa0323a90 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/fc5ad08c9d2e4e3a9f97d48aa0323a90 2024-12-11T04:29:15,539 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/73c9326c17cd401a8aa8167f59d3d858 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/73c9326c17cd401a8aa8167f59d3d858 2024-12-11T04:29:15,540 DEBUG [StoreCloser-TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/d23a7928aca14975bb605c2378ca48fc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a0565cc8a1b942f79346ee2a41a0af50, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/0eeef0cd0b7b4a74ab2a178853c9779c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/c957ca52cdfb4d31b560ab522c4f09cd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/7ed18ab4932643db9313fca209206c96, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/f03de02559b946de9dd9406a1a98e064, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/aa10282e587748dc84e04cb33353086c, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a621efc3be8d494586b1c8637fdc41c6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/70e97473ba4f48008a4dd70fc705dda1, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/534c140c5d464abb9c65b882860c42e3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/f3710bf23d0a43ab817d105bf875e822, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/03a9456292c7433894ebd0989860dd67, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/49e05ad891c549c48c9f0199ec33a855, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/71ae609f101a4fdba092ce122874fc47, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/8f8f7a9f4eab4436a83ae5796a556ec7, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/bb57b6c9597a4fc899e2b6606b8ee703, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/642002e7a94b406cb9ec71899616eb13, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/6074867de7e0466c82ef815733454af8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/84ab138c71314f5483f169a27c617559, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/37cb900edac245909424deff4792285a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/99f9e53c646d457bb712ec11ad3bbc9d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/ff1fdc5aef27492fb9d42764077295d3, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/d41910f7e8f6479596d75c04ab9c251f, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/1684be0bbfd5442fa29496c7804ded8d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a9f637ef28d74bb3bab1cc842effa4eb] to archive 2024-12-11T04:29:15,541 DEBUG [StoreCloser-TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:29:15,543 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/d23a7928aca14975bb605c2378ca48fc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/d23a7928aca14975bb605c2378ca48fc 2024-12-11T04:29:15,543 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a0565cc8a1b942f79346ee2a41a0af50 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a0565cc8a1b942f79346ee2a41a0af50 2024-12-11T04:29:15,543 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/aa10282e587748dc84e04cb33353086c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/aa10282e587748dc84e04cb33353086c 2024-12-11T04:29:15,543 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/0eeef0cd0b7b4a74ab2a178853c9779c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/0eeef0cd0b7b4a74ab2a178853c9779c 2024-12-11T04:29:15,543 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a621efc3be8d494586b1c8637fdc41c6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a621efc3be8d494586b1c8637fdc41c6 2024-12-11T04:29:15,543 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/7ed18ab4932643db9313fca209206c96 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/7ed18ab4932643db9313fca209206c96 2024-12-11T04:29:15,543 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/f03de02559b946de9dd9406a1a98e064 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/f03de02559b946de9dd9406a1a98e064 2024-12-11T04:29:15,543 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/c957ca52cdfb4d31b560ab522c4f09cd to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/c957ca52cdfb4d31b560ab522c4f09cd 2024-12-11T04:29:15,545 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/70e97473ba4f48008a4dd70fc705dda1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/70e97473ba4f48008a4dd70fc705dda1 2024-12-11T04:29:15,545 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/03a9456292c7433894ebd0989860dd67 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/03a9456292c7433894ebd0989860dd67 2024-12-11T04:29:15,545 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/f3710bf23d0a43ab817d105bf875e822 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/f3710bf23d0a43ab817d105bf875e822 2024-12-11T04:29:15,545 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/534c140c5d464abb9c65b882860c42e3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/534c140c5d464abb9c65b882860c42e3 2024-12-11T04:29:15,545 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/8f8f7a9f4eab4436a83ae5796a556ec7 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/8f8f7a9f4eab4436a83ae5796a556ec7 2024-12-11T04:29:15,545 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/bb57b6c9597a4fc899e2b6606b8ee703 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/bb57b6c9597a4fc899e2b6606b8ee703 2024-12-11T04:29:15,545 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/49e05ad891c549c48c9f0199ec33a855 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/49e05ad891c549c48c9f0199ec33a855 2024-12-11T04:29:15,545 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/71ae609f101a4fdba092ce122874fc47 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/71ae609f101a4fdba092ce122874fc47 2024-12-11T04:29:15,547 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/642002e7a94b406cb9ec71899616eb13 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/642002e7a94b406cb9ec71899616eb13 2024-12-11T04:29:15,547 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/6074867de7e0466c82ef815733454af8 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/6074867de7e0466c82ef815733454af8 2024-12-11T04:29:15,547 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/1684be0bbfd5442fa29496c7804ded8d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/1684be0bbfd5442fa29496c7804ded8d 2024-12-11T04:29:15,547 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/37cb900edac245909424deff4792285a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/37cb900edac245909424deff4792285a 2024-12-11T04:29:15,547 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/84ab138c71314f5483f169a27c617559 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/84ab138c71314f5483f169a27c617559 2024-12-11T04:29:15,547 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/d41910f7e8f6479596d75c04ab9c251f to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/d41910f7e8f6479596d75c04ab9c251f 2024-12-11T04:29:15,547 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/ff1fdc5aef27492fb9d42764077295d3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/ff1fdc5aef27492fb9d42764077295d3 2024-12-11T04:29:15,547 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/99f9e53c646d457bb712ec11ad3bbc9d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/99f9e53c646d457bb712ec11ad3bbc9d 2024-12-11T04:29:15,548 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a9f637ef28d74bb3bab1cc842effa4eb to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/a9f637ef28d74bb3bab1cc842effa4eb 2024-12-11T04:29:15,549 DEBUG [StoreCloser-TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/128f1651d564484fb0305bfd8676ad0d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/61be4930774948debfd6e362db5c5bca, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/a85df34644a94a748eaf9e2ffbe82f3b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/4a7b63c883c34ebfa9db65af603384b2, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/bca4373974e141e685d6058a11a072d9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/804c2d9e140340b39295b207dc68b3cc, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/0f7a085fad8b422db63cbcdcee8cdaf9, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/601e476f47b64fe6aaa683b2cd55399a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/9ee1a632363846f9993a26b3c62cb310, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/1c81b1f6f0814adda719bcaa74163b05, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/163e5fb6b49f4604baf984068940d5f0, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/dd112c631d4f4026add4acefaf85647b, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/060d748b59f141e8b93ba548cf5cc1d8, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/3ab70be3ba4c47be843af7b6f8a92d71, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/4424d7babfcc4931a7095b951bbaca15, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/52200592c22c4af2be61a8cd39b8709d, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/9b8b353f3b074ebc95aa7c7dda13f0de, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/454d54f596a44f61922125b4a453e689, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/c876261693a24a90833d177aa4ffd4f4, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/22894d93d9aa49e9a335c6ae048b807a, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/098b3065a6ed46ca91be3a5fc5c267cd, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/c851067a5ce349afa795e6810444d576, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/3c8c52a562264659ad3049dbf44de3c6, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/fd47a7807b3a476ab145fd0e56df5317] to archive 2024-12-11T04:29:15,549 DEBUG [StoreCloser-TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-11T04:29:15,551 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/a85df34644a94a748eaf9e2ffbe82f3b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/a85df34644a94a748eaf9e2ffbe82f3b 2024-12-11T04:29:15,551 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/804c2d9e140340b39295b207dc68b3cc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/804c2d9e140340b39295b207dc68b3cc 2024-12-11T04:29:15,551 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/128f1651d564484fb0305bfd8676ad0d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/128f1651d564484fb0305bfd8676ad0d 2024-12-11T04:29:15,552 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/601e476f47b64fe6aaa683b2cd55399a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/601e476f47b64fe6aaa683b2cd55399a 2024-12-11T04:29:15,552 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/bca4373974e141e685d6058a11a072d9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/bca4373974e141e685d6058a11a072d9 2024-12-11T04:29:15,552 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/61be4930774948debfd6e362db5c5bca to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/61be4930774948debfd6e362db5c5bca 2024-12-11T04:29:15,552 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/4a7b63c883c34ebfa9db65af603384b2 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/4a7b63c883c34ebfa9db65af603384b2 2024-12-11T04:29:15,552 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/0f7a085fad8b422db63cbcdcee8cdaf9 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/0f7a085fad8b422db63cbcdcee8cdaf9 2024-12-11T04:29:15,553 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/9ee1a632363846f9993a26b3c62cb310 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/9ee1a632363846f9993a26b3c62cb310 2024-12-11T04:29:15,553 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/060d748b59f141e8b93ba548cf5cc1d8 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/060d748b59f141e8b93ba548cf5cc1d8 2024-12-11T04:29:15,553 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/dd112c631d4f4026add4acefaf85647b to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/dd112c631d4f4026add4acefaf85647b 2024-12-11T04:29:15,553 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/1c81b1f6f0814adda719bcaa74163b05 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/1c81b1f6f0814adda719bcaa74163b05 2024-12-11T04:29:15,553 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/163e5fb6b49f4604baf984068940d5f0 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/163e5fb6b49f4604baf984068940d5f0 2024-12-11T04:29:15,553 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/3ab70be3ba4c47be843af7b6f8a92d71 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/3ab70be3ba4c47be843af7b6f8a92d71 2024-12-11T04:29:15,553 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/4424d7babfcc4931a7095b951bbaca15 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/4424d7babfcc4931a7095b951bbaca15 2024-12-11T04:29:15,554 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/52200592c22c4af2be61a8cd39b8709d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/52200592c22c4af2be61a8cd39b8709d 2024-12-11T04:29:15,555 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/454d54f596a44f61922125b4a453e689 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/454d54f596a44f61922125b4a453e689 2024-12-11T04:29:15,555 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/9b8b353f3b074ebc95aa7c7dda13f0de to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/9b8b353f3b074ebc95aa7c7dda13f0de 2024-12-11T04:29:15,555 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/c876261693a24a90833d177aa4ffd4f4 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/c876261693a24a90833d177aa4ffd4f4 2024-12-11T04:29:15,555 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/3c8c52a562264659ad3049dbf44de3c6 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/3c8c52a562264659ad3049dbf44de3c6 2024-12-11T04:29:15,555 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/22894d93d9aa49e9a335c6ae048b807a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/22894d93d9aa49e9a335c6ae048b807a 2024-12-11T04:29:15,555 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/098b3065a6ed46ca91be3a5fc5c267cd to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/098b3065a6ed46ca91be3a5fc5c267cd 2024-12-11T04:29:15,555 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/c851067a5ce349afa795e6810444d576 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/c851067a5ce349afa795e6810444d576 2024-12-11T04:29:15,555 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/fd47a7807b3a476ab145fd0e56df5317 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/fd47a7807b3a476ab145fd0e56df5317 2024-12-11T04:29:15,558 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/recovered.edits/386.seqid, newMaxSeqId=386, maxSeqId=4 2024-12-11T04:29:15,559 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc. 2024-12-11T04:29:15,559 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] regionserver.HRegion(1635): Region close journal for 697517215fc3b7180eb3ba48942407dc: 2024-12-11T04:29:15,560 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION, pid=194}] handler.UnassignRegionHandler(170): Closed 697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,560 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=697517215fc3b7180eb3ba48942407dc, regionState=CLOSED 2024-12-11T04:29:15,562 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=194, resume processing ppid=193 2024-12-11T04:29:15,562 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=193, state=SUCCESS; CloseRegionProcedure 697517215fc3b7180eb3ba48942407dc, server=5f466b3719ec,39071,1733891180267 in 1.8280 sec 2024-12-11T04:29:15,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=193, resume processing ppid=192 2024-12-11T04:29:15,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=697517215fc3b7180eb3ba48942407dc, UNASSIGN in 1.8310 sec 2024-12-11T04:29:15,564 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-11T04:29:15,564 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8340 sec 2024-12-11T04:29:15,565 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733891355565"}]},"ts":"1733891355565"} 2024-12-11T04:29:15,566 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-11T04:29:15,567 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-11T04:29:15,568 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8430 sec 2024-12-11T04:29:15,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-11T04:29:15,830 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 191 completed 2024-12-11T04:29:15,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-11T04:29:15,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] procedure2.ProcedureExecutor(1098): Stored pid=195, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:29:15,832 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=195, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:29:15,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-12-11T04:29:15,832 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=195, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:29:15,834 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,836 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C, FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/recovered.edits] 2024-12-11T04:29:15,838 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/5c9ca58efef74c77993811dbbeef1a24 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/5c9ca58efef74c77993811dbbeef1a24 2024-12-11T04:29:15,838 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/f132e548061b4209a19d6fb02ec941b1 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/f132e548061b4209a19d6fb02ec941b1 2024-12-11T04:29:15,838 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9b4034e7a3d64298a71d5ddc36a0dd96 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/A/9b4034e7a3d64298a71d5ddc36a0dd96 2024-12-11T04:29:15,840 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/5e3b4575819e4042bd88249d84dc2e17 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/5e3b4575819e4042bd88249d84dc2e17 2024-12-11T04:29:15,841 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/af1b3f15b8d949d989bae45468c2ee2d to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/af1b3f15b8d949d989bae45468c2ee2d 2024-12-11T04:29:15,841 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/c877eeb38dc942cdadd70f0cf8c16f6a to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/B/c877eeb38dc942cdadd70f0cf8c16f6a 2024-12-11T04:29:15,844 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/e080f2acf8464816872ded806faef8d3 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/e080f2acf8464816872ded806faef8d3 2024-12-11T04:29:15,844 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/2e5b0b14f83a4c08bedf29dc693ca666 to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/2e5b0b14f83a4c08bedf29dc693ca666 2024-12-11T04:29:15,844 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/390cb782c9f746319392117368b73b6c to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/C/390cb782c9f746319392117368b73b6c 2024-12-11T04:29:15,846 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/recovered.edits/386.seqid to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc/recovered.edits/386.seqid 2024-12-11T04:29:15,846 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/default/TestAcidGuarantees/697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,846 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-11T04:29:15,847 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-11T04:29:15,847 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-11T04:29:15,853 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111183ae0ff22a4b879634814ca218c1ed_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111183ae0ff22a4b879634814ca218c1ed_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,853 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121122c5716728704f9db47f5e7f764e7932_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121122c5716728704f9db47f5e7f764e7932_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,853 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111f1b6a737a8442e7b66686d78e3f910a_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412111f1b6a737a8442e7b66686d78e3f910a_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,853 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121123d166a1551248f6a210c5d36b899ffb_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121123d166a1551248f6a210c5d36b899ffb_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,853 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211079e059a7d534ee18dc9a56f4d1b039f_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211079e059a7d534ee18dc9a56f4d1b039f_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,853 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412112763a8cbb4c24c13abadba5261344423_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412112763a8cbb4c24c13abadba5261344423_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,853 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121128dbcbcfe125464b8c34b0eda5c9f6b4_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121128dbcbcfe125464b8c34b0eda5c9f6b4_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,853 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114b076ee541114d99af5538351f3fb5c3_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412114b076ee541114d99af5538351f3fb5c3_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,854 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117da59b9319db4252920a655b76845969_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412117da59b9319db4252920a655b76845969_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,854 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412116aa9c30b581c4106b0b7b5e8f63db40a_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412116aa9c30b581c4106b0b7b5e8f63db40a_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,855 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121196028cd218094eb7956ef2364358a650_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121196028cd218094eb7956ef2364358a650_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,855 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211bec9febc67af4e7991ca04baad2f5feb_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211bec9febc67af4e7991ca04baad2f5feb_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,855 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211bac0d1bb4c644de1884185ff9a7ee196_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211bac0d1bb4c644de1884185ff9a7ee196_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,855 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c95b49a128f8406ca73626d6731122b7_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c95b49a128f8406ca73626d6731122b7_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,855 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c8fb818a5e154f63b07c05e84d2293a0_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211c8fb818a5e154f63b07c05e84d2293a0_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,855 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211d909c8734edd4f308ef5a174d48eb2ae_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211d909c8734edd4f308ef5a174d48eb2ae_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,856 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211df6eaaed49fb4753a32b32b78d4236fd_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211df6eaaed49fb4753a32b32b78d4236fd_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,856 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e168d4168e6c42b28b9ec2b6325a094a_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e168d4168e6c42b28b9ec2b6325a094a_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,856 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e60a380bcddb4f2f8c4fd64a5543d8f6_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211e60a380bcddb4f2f8c4fd64a5543d8f6_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,856 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211eaf4edfcd1b449aaae3aae4efdc89e4a_697517215fc3b7180eb3ba48942407dc to hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241211eaf4edfcd1b449aaae3aae4efdc89e4a_697517215fc3b7180eb3ba48942407dc 2024-12-11T04:29:15,856 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-11T04:29:15,858 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=195, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:29:15,860 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-11T04:29:15,862 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-11T04:29:15,862 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=195, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:29:15,862 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-11T04:29:15,862 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733891355862"}]},"ts":"9223372036854775807"} 2024-12-11T04:29:15,864 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-11T04:29:15,864 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 697517215fc3b7180eb3ba48942407dc, NAME => 'TestAcidGuarantees,,1733891326709.697517215fc3b7180eb3ba48942407dc.', STARTKEY => '', ENDKEY => ''}] 2024-12-11T04:29:15,864 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-11T04:29:15,864 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733891355864"}]},"ts":"9223372036854775807"} 2024-12-11T04:29:15,865 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-11T04:29:15,867 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=195, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-11T04:29:15,867 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 36 msec 2024-12-11T04:29:15,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46111 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=195 2024-12-11T04:29:15,933 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 195 completed 2024-12-11T04:29:15,943 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=248 (was 246) - Thread LEAK? -, OpenFileDescriptor=462 (was 454) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=412 (was 390) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3437 (was 3499) 2024-12-11T04:29:15,943 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-11T04:29:15,944 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-11T04:29:15,944 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76523d14 to 127.0.0.1:50078 2024-12-11T04:29:15,944 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:15,944 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T04:29:15,944 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=769703085, stopped=false 2024-12-11T04:29:15,944 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=5f466b3719ec,46111,1733891179518 2024-12-11T04:29:15,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T04:29:15,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T04:29:15,946 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-11T04:29:15,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:29:15,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:29:15,947 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:15,947 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T04:29:15,947 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T04:29:15,947 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '5f466b3719ec,39071,1733891180267' ***** 2024-12-11T04:29:15,947 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-11T04:29:15,948 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-11T04:29:15,948 INFO [RS:0;5f466b3719ec:39071 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T04:29:15,948 INFO [RS:0;5f466b3719ec:39071 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T04:29:15,948 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-11T04:29:15,948 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(3579): Received CLOSE for 59414c8d1d0a5ff3a2f59cbb4ca8825b 2024-12-11T04:29:15,948 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1224): stopping server 5f466b3719ec,39071,1733891180267 2024-12-11T04:29:15,948 DEBUG [RS:0;5f466b3719ec:39071 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:15,948 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T04:29:15,949 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T04:29:15,949 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T04:29:15,949 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-11T04:29:15,949 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 59414c8d1d0a5ff3a2f59cbb4ca8825b, disabling compactions & flushes 2024-12-11T04:29:15,949 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. 2024-12-11T04:29:15,949 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. 2024-12-11T04:29:15,949 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. after waiting 0 ms 2024-12-11T04:29:15,949 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. 2024-12-11T04:29:15,949 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 59414c8d1d0a5ff3a2f59cbb4ca8825b 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-11T04:29:15,949 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-11T04:29:15,949 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 59414c8d1d0a5ff3a2f59cbb4ca8825b=hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b.} 2024-12-11T04:29:15,949 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-11T04:29:15,949 INFO [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-11T04:29:15,949 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-11T04:29:15,949 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T04:29:15,949 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T04:29:15,949 INFO [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-11T04:29:15,950 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 59414c8d1d0a5ff3a2f59cbb4ca8825b 2024-12-11T04:29:15,957 INFO [regionserver/5f466b3719ec:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T04:29:15,971 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/namespace/59414c8d1d0a5ff3a2f59cbb4ca8825b/.tmp/info/26be16a312054bee997ac1f51eda4d8d is 45, key is default/info:d/1733891185205/Put/seqid=0 2024-12-11T04:29:15,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742519_1695 (size=5037) 2024-12-11T04:29:15,976 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/.tmp/info/a83baa096bc74a5285c5ca8891410d05 is 143, key is hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b./info:regioninfo/1733891185071/Put/seqid=0 2024-12-11T04:29:15,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742520_1696 (size=7725) 2024-12-11T04:29:16,150 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 59414c8d1d0a5ff3a2f59cbb4ca8825b 2024-12-11T04:29:16,350 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 59414c8d1d0a5ff3a2f59cbb4ca8825b 2024-12-11T04:29:16,375 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/namespace/59414c8d1d0a5ff3a2f59cbb4ca8825b/.tmp/info/26be16a312054bee997ac1f51eda4d8d 2024-12-11T04:29:16,378 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/namespace/59414c8d1d0a5ff3a2f59cbb4ca8825b/.tmp/info/26be16a312054bee997ac1f51eda4d8d as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/namespace/59414c8d1d0a5ff3a2f59cbb4ca8825b/info/26be16a312054bee997ac1f51eda4d8d 2024-12-11T04:29:16,380 INFO [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/.tmp/info/a83baa096bc74a5285c5ca8891410d05 2024-12-11T04:29:16,381 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/namespace/59414c8d1d0a5ff3a2f59cbb4ca8825b/info/26be16a312054bee997ac1f51eda4d8d, entries=2, sequenceid=6, filesize=4.9 K 2024-12-11T04:29:16,381 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 59414c8d1d0a5ff3a2f59cbb4ca8825b in 432ms, sequenceid=6, compaction requested=false 2024-12-11T04:29:16,385 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/namespace/59414c8d1d0a5ff3a2f59cbb4ca8825b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-11T04:29:16,385 INFO [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. 2024-12-11T04:29:16,385 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 59414c8d1d0a5ff3a2f59cbb4ca8825b: 2024-12-11T04:29:16,385 DEBUG [RS_CLOSE_REGION-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733891183839.59414c8d1d0a5ff3a2f59cbb4ca8825b. 2024-12-11T04:29:16,398 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/.tmp/rep_barrier/cff7051b28594650801880a6759320dc is 102, key is TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4./rep_barrier:/1733891212024/DeleteFamily/seqid=0 2024-12-11T04:29:16,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742521_1697 (size=6025) 2024-12-11T04:29:16,521 INFO [regionserver/5f466b3719ec:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-11T04:29:16,521 INFO [regionserver/5f466b3719ec:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-11T04:29:16,550 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-11T04:29:16,751 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-11T04:29:16,802 INFO [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/.tmp/rep_barrier/cff7051b28594650801880a6759320dc 2024-12-11T04:29:16,819 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/.tmp/table/38e03cc31ae44ca1b89be44ff8db81b2 is 96, key is TestAcidGuarantees,,1733891185401.c8c23c02526ae28f7a94d562fbd47bb4./table:/1733891212024/DeleteFamily/seqid=0 2024-12-11T04:29:16,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742522_1698 (size=5942) 2024-12-11T04:29:16,951 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-11T04:29:16,951 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-11T04:29:16,951 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-11T04:29:17,151 DEBUG [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-11T04:29:17,222 INFO [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/.tmp/table/38e03cc31ae44ca1b89be44ff8db81b2 2024-12-11T04:29:17,226 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/.tmp/info/a83baa096bc74a5285c5ca8891410d05 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/info/a83baa096bc74a5285c5ca8891410d05 2024-12-11T04:29:17,228 INFO [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/info/a83baa096bc74a5285c5ca8891410d05, entries=22, sequenceid=93, filesize=7.5 K 2024-12-11T04:29:17,229 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/.tmp/rep_barrier/cff7051b28594650801880a6759320dc as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/rep_barrier/cff7051b28594650801880a6759320dc 2024-12-11T04:29:17,232 INFO [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/rep_barrier/cff7051b28594650801880a6759320dc, entries=6, sequenceid=93, filesize=5.9 K 2024-12-11T04:29:17,232 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/.tmp/table/38e03cc31ae44ca1b89be44ff8db81b2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/table/38e03cc31ae44ca1b89be44ff8db81b2 2024-12-11T04:29:17,234 INFO [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/table/38e03cc31ae44ca1b89be44ff8db81b2, entries=9, sequenceid=93, filesize=5.8 K 2024-12-11T04:29:17,235 INFO [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1286ms, sequenceid=93, compaction requested=false 2024-12-11T04:29:17,238 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-11T04:29:17,239 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T04:29:17,239 INFO [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-11T04:29:17,239 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-11T04:29:17,239 DEBUG [RS_CLOSE_META-regionserver/5f466b3719ec:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T04:29:17,351 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1250): stopping server 5f466b3719ec,39071,1733891180267; all regions closed. 2024-12-11T04:29:17,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741834_1010 (size=26050) 2024-12-11T04:29:17,357 DEBUG [RS:0;5f466b3719ec:39071 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/oldWALs 2024-12-11T04:29:17,357 INFO [RS:0;5f466b3719ec:39071 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 5f466b3719ec%2C39071%2C1733891180267.meta:.meta(num 1733891183586) 2024-12-11T04:29:17,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741832_1008 (size=16168449) 2024-12-11T04:29:17,360 DEBUG [RS:0;5f466b3719ec:39071 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/oldWALs 2024-12-11T04:29:17,360 INFO [RS:0;5f466b3719ec:39071 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 5f466b3719ec%2C39071%2C1733891180267:(num 1733891182625) 2024-12-11T04:29:17,360 DEBUG [RS:0;5f466b3719ec:39071 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:17,360 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T04:29:17,360 INFO [RS:0;5f466b3719ec:39071 {}] hbase.ChoreService(370): Chore service for: regionserver/5f466b3719ec:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-11T04:29:17,361 INFO [regionserver/5f466b3719ec:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-11T04:29:17,361 INFO [RS:0;5f466b3719ec:39071 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:39071 2024-12-11T04:29:17,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/5f466b3719ec,39071,1733891180267 2024-12-11T04:29:17,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T04:29:17,366 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [5f466b3719ec,39071,1733891180267] 2024-12-11T04:29:17,366 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 5f466b3719ec,39071,1733891180267; numProcessing=1 2024-12-11T04:29:17,367 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/5f466b3719ec,39071,1733891180267 already deleted, retry=false 2024-12-11T04:29:17,367 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 5f466b3719ec,39071,1733891180267 expired; onlineServers=0 2024-12-11T04:29:17,367 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '5f466b3719ec,46111,1733891179518' ***** 2024-12-11T04:29:17,367 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T04:29:17,367 DEBUG [M:0;5f466b3719ec:46111 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51bce639, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=5f466b3719ec/172.17.0.2:0 2024-12-11T04:29:17,368 INFO [M:0;5f466b3719ec:46111 {}] regionserver.HRegionServer(1224): stopping server 5f466b3719ec,46111,1733891179518 2024-12-11T04:29:17,368 INFO [M:0;5f466b3719ec:46111 {}] regionserver.HRegionServer(1250): stopping server 5f466b3719ec,46111,1733891179518; all regions closed. 2024-12-11T04:29:17,368 DEBUG [M:0;5f466b3719ec:46111 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T04:29:17,368 DEBUG [M:0;5f466b3719ec:46111 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T04:29:17,368 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T04:29:17,368 DEBUG [M:0;5f466b3719ec:46111 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T04:29:17,368 DEBUG [master/5f466b3719ec:0:becomeActiveMaster-HFileCleaner.large.0-1733891182343 {}] cleaner.HFileCleaner(306): Exit Thread[master/5f466b3719ec:0:becomeActiveMaster-HFileCleaner.large.0-1733891182343,5,FailOnTimeoutGroup] 2024-12-11T04:29:17,368 DEBUG [master/5f466b3719ec:0:becomeActiveMaster-HFileCleaner.small.0-1733891182345 {}] cleaner.HFileCleaner(306): Exit Thread[master/5f466b3719ec:0:becomeActiveMaster-HFileCleaner.small.0-1733891182345,5,FailOnTimeoutGroup] 2024-12-11T04:29:17,368 INFO [M:0;5f466b3719ec:46111 {}] hbase.ChoreService(370): Chore service for: master/5f466b3719ec:0 had [] on shutdown 2024-12-11T04:29:17,368 DEBUG [M:0;5f466b3719ec:46111 {}] master.HMaster(1733): Stopping service threads 2024-12-11T04:29:17,368 INFO [M:0;5f466b3719ec:46111 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T04:29:17,368 ERROR [M:0;5f466b3719ec:46111 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (509200408) connection to localhost/127.0.0.1:43317 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:43317,5,PEWorkerGroup] 2024-12-11T04:29:17,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T04:29:17,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T04:29:17,369 INFO [M:0;5f466b3719ec:46111 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T04:29:17,369 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T04:29:17,369 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T04:29:17,370 DEBUG [M:0;5f466b3719ec:46111 {}] zookeeper.ZKUtil(347): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T04:29:17,370 WARN [M:0;5f466b3719ec:46111 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T04:29:17,370 INFO [M:0;5f466b3719ec:46111 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-11T04:29:17,370 INFO [M:0;5f466b3719ec:46111 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T04:29:17,370 DEBUG [M:0;5f466b3719ec:46111 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T04:29:17,370 INFO [M:0;5f466b3719ec:46111 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T04:29:17,370 DEBUG [M:0;5f466b3719ec:46111 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T04:29:17,370 DEBUG [M:0;5f466b3719ec:46111 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T04:29:17,370 DEBUG [M:0;5f466b3719ec:46111 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T04:29:17,370 INFO [M:0;5f466b3719ec:46111 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=793.57 KB heapSize=977 KB 2024-12-11T04:29:17,385 DEBUG [M:0;5f466b3719ec:46111 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c327730f012246559a21f9664dc32bf2 is 82, key is hbase:meta,,1/info:regioninfo/1733891183730/Put/seqid=0 2024-12-11T04:29:17,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742523_1699 (size=5672) 2024-12-11T04:29:17,388 INFO [M:0;5f466b3719ec:46111 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2252 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c327730f012246559a21f9664dc32bf2 2024-12-11T04:29:17,407 DEBUG [M:0;5f466b3719ec:46111 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/88541f4e30ec47f5bdedf539cd92cd94 is 2283, key is \x00\x00\x00\x00\x00\x00\x00o/proc:d/1733891276258/Put/seqid=0 2024-12-11T04:29:17,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742524_1700 (size=47838) 2024-12-11T04:29:17,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T04:29:17,466 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39071-0x1007f5379a30001, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T04:29:17,466 INFO [RS:0;5f466b3719ec:39071 {}] regionserver.HRegionServer(1307): Exiting; stopping=5f466b3719ec,39071,1733891180267; zookeeper connection closed. 2024-12-11T04:29:17,467 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7bcb0493 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7bcb0493 2024-12-11T04:29:17,467 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-11T04:29:17,811 INFO [M:0;5f466b3719ec:46111 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=793.01 KB at sequenceid=2252 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/88541f4e30ec47f5bdedf539cd92cd94 2024-12-11T04:29:17,813 INFO [M:0;5f466b3719ec:46111 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 88541f4e30ec47f5bdedf539cd92cd94 2024-12-11T04:29:17,828 DEBUG [M:0;5f466b3719ec:46111 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8435f132ffd443ac958e842f9628862b is 69, key is 5f466b3719ec,39071,1733891180267/rs:state/1733891182401/Put/seqid=0 2024-12-11T04:29:17,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073742525_1701 (size=5156) 2024-12-11T04:29:18,232 INFO [M:0;5f466b3719ec:46111 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2252 (bloomFilter=true), to=hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8435f132ffd443ac958e842f9628862b 2024-12-11T04:29:18,235 DEBUG [M:0;5f466b3719ec:46111 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c327730f012246559a21f9664dc32bf2 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c327730f012246559a21f9664dc32bf2 2024-12-11T04:29:18,238 INFO [M:0;5f466b3719ec:46111 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c327730f012246559a21f9664dc32bf2, entries=8, sequenceid=2252, filesize=5.5 K 2024-12-11T04:29:18,238 DEBUG [M:0;5f466b3719ec:46111 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/88541f4e30ec47f5bdedf539cd92cd94 as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/88541f4e30ec47f5bdedf539cd92cd94 2024-12-11T04:29:18,241 INFO [M:0;5f466b3719ec:46111 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 88541f4e30ec47f5bdedf539cd92cd94 2024-12-11T04:29:18,241 INFO [M:0;5f466b3719ec:46111 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/88541f4e30ec47f5bdedf539cd92cd94, entries=195, sequenceid=2252, filesize=46.7 K 2024-12-11T04:29:18,241 DEBUG [M:0;5f466b3719ec:46111 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/8435f132ffd443ac958e842f9628862b as hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8435f132ffd443ac958e842f9628862b 2024-12-11T04:29:18,244 INFO [M:0;5f466b3719ec:46111 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:43317/user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/8435f132ffd443ac958e842f9628862b, entries=1, sequenceid=2252, filesize=5.0 K 2024-12-11T04:29:18,244 INFO [M:0;5f466b3719ec:46111 {}] regionserver.HRegion(3040): Finished flush of dataSize ~793.57 KB/812612, heapSize ~976.70 KB/1000144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 874ms, sequenceid=2252, compaction requested=false 2024-12-11T04:29:18,246 INFO [M:0;5f466b3719ec:46111 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T04:29:18,246 DEBUG [M:0;5f466b3719ec:46111 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-11T04:29:18,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45413 is added to blk_1073741830_1006 (size=960295) 2024-12-11T04:29:18,248 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/5a0e7cb9-23ea-76dc-1c68-9c53a8e24ea5/MasterData/WALs/5f466b3719ec,46111,1733891179518/5f466b3719ec%2C46111%2C1733891179518.1733891181844 not finished, retry = 0 2024-12-11T04:29:18,348 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-11T04:29:18,348 INFO [M:0;5f466b3719ec:46111 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-11T04:29:18,349 INFO [M:0;5f466b3719ec:46111 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46111 2024-12-11T04:29:18,350 DEBUG [M:0;5f466b3719ec:46111 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/5f466b3719ec,46111,1733891179518 already deleted, retry=false 2024-12-11T04:29:18,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T04:29:18,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46111-0x1007f5379a30000, quorum=127.0.0.1:50078, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T04:29:18,452 INFO [M:0;5f466b3719ec:46111 {}] regionserver.HRegionServer(1307): Exiting; stopping=5f466b3719ec,46111,1733891179518; zookeeper connection closed. 2024-12-11T04:29:18,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3054265c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T04:29:18,460 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@65902fec{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T04:29:18,460 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T04:29:18,460 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T04:29:18,460 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ca71a25{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/hadoop.log.dir/,STOPPED} 2024-12-11T04:29:18,464 WARN [BP-232036649-172.17.0.2-1733891176636 heartbeating to localhost/127.0.0.1:43317 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T04:29:18,464 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T04:29:18,464 WARN [BP-232036649-172.17.0.2-1733891176636 heartbeating to localhost/127.0.0.1:43317 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-232036649-172.17.0.2-1733891176636 (Datanode Uuid 155eeafc-ea12-4b95-970b-e440039dff40) service to localhost/127.0.0.1:43317 2024-12-11T04:29:18,464 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T04:29:18,467 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/cluster_a7ae6713-c4ce-cfe9-805e-0174746b49b0/dfs/data/data1/current/BP-232036649-172.17.0.2-1733891176636 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T04:29:18,467 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/cluster_a7ae6713-c4ce-cfe9-805e-0174746b49b0/dfs/data/data2/current/BP-232036649-172.17.0.2-1733891176636 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T04:29:18,468 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T04:29:18,475 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T04:29:18,475 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T04:29:18,475 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T04:29:18,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T04:29:18,476 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/e6d89658-89f2-0e5c-6e9b-90c5c540f28a/hadoop.log.dir/,STOPPED} 2024-12-11T04:29:18,492 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-11T04:29:18,625 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down